#$Id: //depot/prod/DOT/dev/test/nate/lib/TCD.pm#1 $ # # Copyright (c) 2005-2013 NetApp, Inc., All Rights Reserved. # Any use, modification, or distribution is prohibited # without prior written consent from NetApp, Inc. # package TCD; use vars qw( @StdMethods $TestCase_Handle ); BEGIN{ require Exporter; our @ISA = qw(Exporter); our @EXPORT_OK=qw(testcases); } # # Export Global variables # $TCD::WARN = -9; $TCD::SCRIPT = -8; $TCD::CONFIG = -7; $TCD::NA = -6; $TCD::FATAL = -5; $TCD::UNDEF = -4; $TCD::DISABLE = -1; $TCD::PASS = 1; $TCD::FAIL = 0; $TCD::TRUE = 1; $TCD::FALSE = 0; $TCD::POSITIVE = 1; $TCD::NEGATIVE = 0; # # Compiler directives # use v5.14; use strict; # # Module import # use POSIX qw(strftime); use Cwd qw(abs_path getcwd); use Time::Local; use Time::HiRes qw(gettimeofday); use IO::Tee; use IO::File; use Tie::IxHash; use File::Basename qw(fileparse); use File::stat; use Tharn qw($Log); use NATE::Util qw(trim perlmsg_lookup strip_comments); use NATE::Util::DataTypes qw(is_blessed); use NATE::Events qw(call_on_exit_early_add call_on_fork_add); use NATE::Database::Test (); use NATE::ParamSet (); use NATE::Process; use Params::Validate qw(validate_with BOOLEAN); # # List of files we inherit from # use base qw(TCD::Err); my $Border = '='x80; my $Setup_Regex = qr/^(?:init|setup)$/io; my $Cleanup_Regex = qr/^(?:uninit|cleanup)$/io; my $Config_Regex = qr/^(?:init|setup|cleanup|uninit)$/io; my $Options_Regex = qr/^-(rsc|ns|nc|st|cl|nsc|q|h|ndpre|ndpos|l|pcf)$/io; my $All_Err_Regex = qr/0|-[5678]/o;#/$TCD::FAIL|$TCD::FATAL|$TCD::SCRIPT|$TCD::CONFIG|$TCD::NA/ my $Fail_Regex = qr/0|-5/o;#/$TCD::FAIL|$TCD::FATAL/ my $nate_postsetup_label = ':NATE_POST_SETUP:'; call_on_exit_early_add({callback => \&_testcase_end,}); call_on_fork_add({callback => \&_clear_testcase_handle,}); sub _testcase_end { my $self = shift; return if ($self->{current_rerun_attempt}); if ($TestCase_Handle) { Tharn::testcase_end(handle => $TestCase_Handle); $TestCase_Handle=undef; } } sub _clear_testcase_handle {$TestCase_Handle=undef;} sub testcases { my @TESTS=@_; package main; use vars qw($Test); $Test = new TCD(-testcases=>[@TESTS]); if ($Test->error) { $Test->log($Test->errmsg); return $TCD::FAIL; } # Performs method callback $Test->run_test(); if ($Test->error) { $Test->log($Test->errmsg); return $TCD::FAIL; } exit(0); } ############################################################################### # Method: new # Objective: Create a new TCD object. # Details: See POD documentation at the end of this file. ############################################################################### sub new { my ($class,%args) = @_; my ( $config, $fullpath, $self, @argv, @methodlist, %testcases, ); $self = {}; bless $self; no strict 'refs'; $self->{stdout}=(defined(fileno(\*ntest::NTEST_STDOUT))) ? \*ntest::NTEST_STDOUT : new IO::Tee(); $self->{stderr}=(defined(fileno(\*ntest::NTEST_STDERR))) ? \*ntest::NTEST_STDERR : new IO::Tee(); $self->{log} = $self->{stdout}; select((select($self->{log}), $| = 1)[0]); use strict 'refs'; # Retrieve Test Case arguments # from command line. my $tc_param=$self->get_param('TESTCASES') || $self->get_param('ARGS'); @argv =($tc_param) ? split('\s+',($tc_param)) : 'all'; if (defined($args{-options})) { my $optstr = $args{-options}; $optstr=~ s/\s+//g; # strip any whitespace my @optionlist=split(/\,/,$optstr); push @argv,(@optionlist); } $self->{argv}=\@argv; unless (scalar(@argv)) { $self->_set_err($Border, "Error: No test parameters of options were specified for the\n". " thpl parameter 'TESTCASES' on the command line.", $Border); NATE::Result::Config->new("Invalid command line arguments",abortive=>0)->log(); return $self; } # Check to see that we were given either a 'tests' or a 'testcases' # parameter. no strict; tie %testcases,Tie::IxHash; use strict; my $cnt=0; push my @testcases,@{$args{-testcases}}; while ($cnt < scalar(@testcases)){ $testcases{$testcases[$cnt]}=$testcases[$cnt+1]; push @methodlist, $testcases[$cnt]; $cnt+=2; } $self->{testcases} =\%testcases; $self->{methodlist}=\@methodlist; $self->{testcase_to_qc_name}={}; if (defined($args{-testcase_to_qc_name})) { $self->{testcase_to_qc_name} = $args{-testcase_to_qc_name}; } # Make sure we were given at least one test unless (scalar(@{$self->{methodlist}})>0) { $self->_set_err('Error: TCD::new either parameter "tests" ', 'or "testcases" must be specified'); return $self; } # Validate input argument list to thpl file. $self->_args_list($self->{methodlist}); if ($self->error) { return $self } # Test Name $fullpath=$self->get_param('TESTNAME'); if ($self->error) { return $self } # Testcase information # XXX: Doesn't NATE already have this broken apart somewhere? # Project, testcase path, and testcase name are located # after the 'test' directory name. # '.../test/[project]/[tcpath]/[tesTCD]' my($testname, $dir) = fileparse($fullpath,'.thpl'); my $tcpath = $dir; $self->{testbed} = getcwd(); $tcpath =~ s/^$self->{testbed}//; $tcpath =~ s/(\\|\/)$//; $tcpath =~ s/^(\\|\/)//; $self->{tcpath} = $tcpath; $self->{testscript_dir} = $dir; $self->{testscript} = $testname; if ($self->_is_quiet()){ $self->{stdout} = IO::Tee->new(); $self->{log} = IO::Tee->new(); select((select($self->{log}), $| = 1)[0]); } # Display testcase name being executed $self->display('','Test Script: '.$testname.'.thpl'); # Store standard methods in global array. @StdMethods=('init','setup','cleanup','uninit'); $self->nlog('TESTBED: '.$self->{testbed}); # Retrieve variables from config file $config=$self->get_param('CONFIG'); my $param = NATE::ParamSet::param_global(); # Retrieve rerun attempts value. $self->{max_rerun_attempts} = $self->get_param('MAX_RERUN_ATTEMPTS'); $param->unset('MAX_RERUN_ATTEMPTS'); # Set the value for TCD_GATHER_DIAGNOSTICS as either the default value 0 # or the value that was passed in from command line my $gather_diagnostics = $param->get( 'TCD_GATHER_DIAGNOSTICS', default => 0 ); $self->gather_diagnostics( is_allowed => $gather_diagnostics ); $self->_clear_err() if(defined $config); if (defined $config) { $self->read_config($config); return $self if($self->error); } $self->{next} = ''; $self->{previous} = ''; return $self; } ############################################################################### # Method: description # Objective: Display test case header # Details: See POD documentation at the end of this file. ############################################################################### sub description { my ($self, @name)=@_; my( @header, $bFirst, $newline, $type, ); # initialize $bFirst=1; # hold a copy of description in tcase to use for logging. $self->{description}="@name"; unless (@name) { if (ref($self->_get_nuffo_description())eq 'ARRAY') { push @name,@{$self->_get_nuffo_description()}; } else { push @name,$self->_get_nuffo_description(); } } $type = ($self->{testname} =~ $Config_Regex) ? 'TEST CONFIG' : 'TEST CASE '; push @header, "\n", '#' x 80, '## TEST SCRIPT: '.$self->{testscript}.'.thpl', '## '.$type.': '.$self->{testname}.'()', '## ' . _get_link_to_script(); foreach my $line (@name[0..scalar(@name)-1]){ $newline=($bFirst==1) ? '## DESCRIPTION: '.$line : '## '.$line; push @header,$newline; $bFirst=0; } push @header, '#' x 80; $Log->comment(join ("\n", @header)); $self->log(@header); } ############################################################################### # Method: display # Objective: print output to stdout # Details: See POD documentation at the end of this file. ############################################################################### sub display { my $self = shift; print {$self->{stdout}} join("\n",@_)."\n"; $self->{stdout}->flush(); } ############################################################################### # Method: get_dir # Objective: get testcase directory path # Details: See POD documentation at the end of this file. ############################################################################### sub get_dir { my $self = shift; $self->_clear_err(); if (defined $self->{testscript_dir}) { return $self->{testscript_dir}; } $self->_set_err('Error: TCD::get_dir() TCD object is missing testcase directory'); return ''; } ############################################################################### # Method: get_loghdl # Objective: Return a reference to the log handle used to log output # Details: See POD documentation at the end of this file. ############################################################################### sub get_loghdl { my $self = shift; return \$self->{log}; } ################################################################################ # Subroutine Name: get_name # Objective: Returns script name # Details: See POD documentation at the end of this file ################################################################################ sub get_name { my $self = shift; $self->_clear_err(); if (defined $self->{testscript}){ return $self->{testscript}; } $self->_set_err('Error: TCD::get_name() Test case name is not set'); return ''; } ################################################################################ # Subroutine Name: get_testcase # get_next_testcase # get_previous_testcase # Objective: Return appropriate test case name # Details: See POD documentation at the end of this file ################################################################################ sub get_testcase { my $self = shift; $self->_clear_err(); if (defined $self->{testname}) { return $self->{testname}; } $self->_set_err('Error: TCD::get_testcase() Test case name is not set'); return ''; } sub get_next_testcase { my $self = shift; $self->_clear_err(); if (defined $self->{next}) { return $self->{next}; } $self->_set_err('Error: TCD::get_next_testcase() Next test case name is not set'); return ''; } sub get_previous_testcase { my $self = shift; $self->_clear_err(); if (defined $self->{previous}){ return $self->{previous}; } $self->_set_err('Error: TCD::get_previous_testcase() Next test case name is not set'); return ''; } ################################################################################ # Subroutine Name: get_previous_result() # Objective: Return appropriate test case name # Details: See POD documentation at the end of this file ################################################################################ sub get_previous_result { my $self = shift; $self->_clear_err(); my $result = $self->{result}; if ($result){ return $result->[-1]->{status}; # note array[-1] is last element } $self->_set_err('Error: TCD::get_previous_result() No results found'); return; } ################################################################################ # Subroutine Name: get_worst_result() # Objective: Return the worst result of all the testcases # Details: See POD documentation at the end of this file ################################################################################ sub get_worst_result { my $self = shift; $self->_clear_err(); my $worst_result = $TCD::PASS; my $results = $self->{result}; foreach my $result (@{$results}) { # if this result is worse than the worst result # update the worst result if ($result->{status} < $worst_result) { $worst_result = $result->{status}; } } # set an error if there is no worst result if (not defined($worst_result)) { $self->_set_err('Could not determine worst result'); return ''; } return $worst_result; } ############################################################################### # Method: get_param # Objective: get parameter value or use default if not set # Details: See POD documentation at the end of this file. ############################################################################### sub get_param # XXX: there should be a faster way { my ($self, $param, $default) = @_; $self->_clear_err(); my %opts = (-default=>$default) if(defined($default)); my $value = Tharn::param($param, %opts); # If called in list context, split values and return array. if (wantarray) { # Some parameters (FILER, etc.) allow multiple values to # be separated by either ',' or ':'. # If the value ends in '.cfg', the caller is requesting the # values to be read from a config file, rather than specifying # them all on the command line. This makes specifying a large # number of clients much easier. my @value; if ($value =~ /\.cfg$/) { open(CFG, $value) || do { $self->_set_err("Can't open config file $value: $!"); return; }; while (my $line=) { chomp $line; # Skip blank or commented out lines. next if ($line =~ /^\#/ or $line =~ /^\s*$/); push @value, $line; } } else { @value = split(/[:|,]/, $value); } return @value; } return $value; } ############################################################################### # Method: gather_diagnostics # Objective: external interface for is_gather_diagnostics_allowed # Details: See POD documentation at the end of this file. ############################################################################### sub gather_diagnostics { my ($self, @args) = @_; # If there are no args to gather_diagnostics, # then return TCD_GATHER_DIAGNOSTICS if (!@args) { return $self->{is_gather_diagnostics_allowed}; } # If there are args, then modify is_gather_diagnostics_allowed else { my %opts = validate_with( params => \@args, spec => { is_allowed => { type => BOOLEAN } }, allow_extra => 0 ); $self->{is_gather_diagnostics_allowed} = $opts{is_allowed}; } } sub disable_stdout { my $self = shift; require File::Spec; open(NULL, '>'.File::Spec->devnull); $self->{log} = \*NULL; select((select($self->{log}), $| = 1)[0]); } sub enable_stdout { my $self = shift; $self->{log} = $self->{stdout}; select((select($self->{log}), $| = 1)[0]); } ############################################################################### # Method: log # Objective: Log test output to stdout and file # Details: See POD documentation at the end of this file. ############################################################################### sub log { my $self = shift; print {$self->{log}} join("\n", @_) . "\n"; $self->{log}->flush(); } ############################################################################### # Method: ndisplay # Objective: print output to stdout and nate logs # Details: See POD documentation at the end of this file. ############################################################################### sub ndisplay { my $self = shift; if (@_) { my $data = join ("\n", @_); $Log->comment($data); $self->display($data); } } ############################################################################### # Method: nlog # Objective: Log test output to stdout, tcase, and nate logs # Details: See POD documentation at the end of this file. ############################################################################### sub nlog { my $self = shift; if (@_) { my $data = join ("\n", @_); $Log->comment($data); $self->log($data); } } ##########################################################################4##### # Method: read_config () # Objective: read config file for environment variables # Details: See POD documentation at the end of this file. ############################################################################### sub read_config { my ($self,$config)=@_; # Clear the error buffer $self->_clear_err(); # Validate that a config file path was passed in. unless (defined $config and length $config){ $self->_set_err("'config' is a required argument"); return undef; } # Append '.cfg' file extension if not provided $config .= '.cfg' if ($config !~ /\.cfg$/); # Determine path of config file. Unless config path starts # with '.', '~', or '/' the path is relative to the directory # location of the test thpl being executed. $config = ($config =~ /^[.~\/]/) ? abs_path($config) : $self->get_dir().$config; # Display location of config file $self->nlog('CONFIG: '.$config); # Return error if the file can't be opened unless (open(CONFIG,"<".$config)) { $self->_set_err("Unable to open config file '".$config."'",$!); return $TCD::FALSE; } # Process the config file parameter value pairs while(my $line=) # XXX: I wonder if NATE::Util has something to do this. { chomp($line = strip_comments($line)); # not containing the '=' character. next if ($line!~/\=/); # If value is enclosed within quotes ('|"), remove quotes. my ($param,$value)=$line=~/(\S+)=['"]?((\S)[^'"]+|\d+)/; NATE::ParamSet::param_global()->get($param, default => $value); } # Close the config file close(CONFIG); } sub _perform_one_time_activities{ my ($self, %opts) = @_; my ($bFound, $test, $testname); my $p = NATE::ParamSet::param_global(); $p->set(key => 'TESTCASES', value => undef); $p->set(key => 'ARGS', value => undef); $bFound=0; # Display list of available tests and return # if the list (-l) option is specified if (scalar(grep{/^-l$/} @{$self->{options}})) { $Log->trace('Returning Test List') if $Log->may_trace(); $self->_list_tests($self->{methodlist}); return $TCD::FALSE; } $Log->trace('Not returning Test List') if $Log->may_trace(); # if the help (-h) option is specified if (scalar(grep{/^-h/} @{$self->{options}})) { $Log->trace('Returning help') if $Log->may_trace(); $self->_help(); return $TCD::FALSE; } $Log->trace('Not Returning help'); # Make sure if -st is explicitly called that # no other test case arguments accompany it. if (scalar(grep{/^-st$/} @{$self->{options}}) and (scalar (@{$self->{tests}}) or scalar (@{$self->{options}})>1)) { $Log->trace('Invalid command line arguments found with -st') if $Log->may_trace(); $self->_set_err($Border, 'Error: TCD::run_test() When specifiying the setup ', 'switch (-st) explicitly, no other command options or ', 'tests can accompany it. Only TESTCASES=-st is allowed', $Border, ); NATE::Result::Config->new('Invalid command line arguments',abortive=>0)->log(); return $TCD::FALSE; } $Log->trace('No invalid command line arguments found with -st') if $Log->may_trace(); unless (scalar (@{$self->{tests}})) { $self->{NOLOG}=1; } # Load postprocess parameter supplied by user. $self->_determine_postsetup(); # Determine if setup/cleanup should be run once # per Test Case or once per Test Case. The # default is to run setup/cleanup once per # Test Case. foreach my $opt (@{$self->{options}}) { next if($opt !~ /^-/); if($opt eq '-rsc') { $self->{bRepeatSetup} = 1; } elsif($opt eq '-st') { $self->{bSetup} = 1; } elsif($opt eq '-cl') { $self->{bCleanup} = 1; } elsif($opt eq '-nc') { $self->{bNoCleanup} = 1; } elsif($opt eq '-ns') { $self->{bNoSetup} = 1; } elsif($opt eq '-nsc') { $self->{bNoSetClean} = 1; } elsif($opt eq '-q') { $self->{bQuiet} = 1; } elsif($opt eq '-ndpre') { $self->{bNoPre} = 1; } elsif($opt eq '-ndpos') { $self->{bNoPost} = 1; } elsif($opt eq '-pcf') { $self->{bPropagateConfigFailure} = 1; } } if ($self->{bCleanup}) { $Log->trace('Cleanup Requested') if $Log->may_trace(); # If cleanup requested execute at the beginning of # test. Only perform again if 1 or more tests are requested # to be executed. my @tmp=(defined($self->{tests}) && scalar(@{$self->{tests}})) ? ('init','cleanup',$self->_setup_fnames(),@{$self->{tests}},'cleanup','uninit') : ('init','cleanup','uninit'); $self->{tests}=\@tmp; } elsif ($self->{bSetup}) { # If setup requested don't execute tests # just setup code. This is because setup is # automatically executed whenever a test is # requested to be run, $Log->trace('Setup and do not execute tests requested') if $Log->may_trace(); my @tmp=('init',$self->_setup_fnames(),'uninit'); $self->{tests}=\@tmp; } elsif ($self->{bNoSetup}) { $Log->trace('Bypass Setup Requested') if $Log->may_trace(); # Determine if user wants to bypass setup my @tmp=('init',@{$self->{tests}},'cleanup','uninit'); $self->{tests}=\@tmp; } elsif ($self->{bNoCleanup}) { # Determine if user wants to bypass cleanup $Log->trace('Bypass Cleanup Requested') if $Log->may_trace(); my @tmp=('init',$self->_setup_fnames(),@{$self->{tests}},'uninit'); $self->{tests}=\@tmp; } elsif ($self->{bNoSetClean}) { # Determine if user wants to bypass setup and cleanup $Log->trace('Bypass Setup and Cleanup Requested') if $Log->may_trace(); my @tmp=('init',@{$self->{tests}},'uninit'); $self->{tests}=\@tmp; } elsif ($self->{bRepeatSetup}){ # Run setup and cleanup for each requested test # to be executed. $Log->trace('Setup and Cleanup will be run between each test') if $Log->may_trace(); my @tmp; foreach my $test (@{$self->{tests}}){ # Not running setup and cleanup for disabled test cases if ($test =~ /^\-/) { push @tmp,$test; } else { push @tmp,$self->_setup_fnames(),$test,'cleanup'; } } @tmp=('init',@tmp,'uninit'); $self->{tests}=\@tmp; } else{ # Run setup once before running all selected # tests followed by running cleanup at the end. $Log->trace('Setup and cleanup will be run once') if $Log->may_trace(); my @tmp=('init',$self->_setup_fnames(),@{$self->{tests}},'cleanup','uninit'); $self->{tests}=\@tmp; } # Check that at least one valid test was supplied for # options that require it. if ($self->{bNoSetup}||$self->{bNoCleanup}||$self->{bNoSetClean}||$self->{bQuiet}) { $Log->trace('Found arguments that apply to tests') if $Log->may_trace(); foreach $test (@{$self->{tests}}) { ($testname)=grep{/^(\-|)$test$/i} @{$self->{methodlist}}; next if (not defined($testname)); $bFound=1; } if (!$bFound) { $Log->trace('No valid test names supplied with selected option'. "\nDefaulting to 'all'") if $Log->may_trace(); my @tmp = ('init'); push @tmp, 'setup' if ! ($self->{bNoSetClean} || $self->{bNoSetup}); push @tmp, @{$self->{methodlist}}; push @tmp, 'cleanup' if ! ($self->{bNoSetClean} || $self->{bNoCleanup}); push @tmp, 'uninit'; $self->{tests} = \@tmp; } } else { $Log->trace('No arguments were found that must be applied to tests') if $Log->may_trace(); } return $TCD::TRUE; } sub execute_test{ my $self = shift; my ( $idx, $test, $testname, @result, @missing, $pp_proc ); $self->{result}=\@result; # Temporary variable to hold array. Avoids function call _setup_fnames # with every turn of the following loop. my @local_array = ('init',$self->_setup_fnames(),@{$self->{methodlist}},'cleanup','uninit'); foreach $test (@{$self->{tests}}) { if (not grep{/^(\-|)$test$/i} @local_array) { $self->_set_err($Border,'Error: TCD::run_test(): '. 'No valid test named "'.$test.'" found!!!', $Border); # Log result and add it to the results list. NATE::Result::Config->new('Invalid test name: '.$test,abortive=>0)->log(); return $TCD::FALSE; } } # # If LISTTESTCASES has been set, all we are going to do is print # testcases and return. # if ($self->get_param('LISTTESTCASES') =~ /1|^y|^t/i) { # XXX: Booooo! $Log->trace('LISTTESTCASES is set') if $Log->may_trace(); foreach $test (@{$self->{tests}}){ # skip disabled and standard test cases next if ($test =~/^\-/); next if ($test =~ $Config_Regex); next if ($test =~ m/^$nate_postsetup_label$/); my $description = ${$self->{testcases}}{$test}; $self->display("testcase : $test : $description"); } return; } # Execute requested Test Cases if($Log->may_debug()) { require Data::Dumper; $Log->debug("Entering testcase loop\nTestcases :".Data::Dumper::Dumper(\@{$self->{tests}})); } while ($idx <= $#{$self->{tests}}) { $test=${$self->{tests}}[$idx]; $Log->debug('Processing test: '.$test) if $Log->may_debug; if ($idx>0) { $self->{previous} = ${$self->{tests}}[$idx - 1]; } else { $self->{previous} = ''; } if ($idx < scalar(@{$self->{tests}})) { $self->{next} = ${$self->{tests}}[$idx + 1]; } else { $self->{next} = ''; } $idx++; $self->{step_nbr}=0; $self->{method}=$test; # Create a hash to store each tests results my $hRes = {}; $self->{test_nbr}++; # Specified testnames are case independent (test1==TeSt1). # Check if this is the case. ($testname)=grep{/^(\-|)$test$/i} @local_array; if (not defined($testname)) { $Log->trace("Test $test is not defined - skipping") if $Log->may_trace(); next; }; # If testname is valid execute it and store its results in # a hash, otherwise inform the user it doesn't exist. package main; #XXX: Shitty context switching requires us to use fully qualified variables, like $Tharn::Log # Check to see if test has been disabled. if ($testname=~/^\-/) { $Tharn::Log->trace("Test $testname is disabled - skipping") if $Tharn::Log->may_trace(); package TCD; # XXX: Is this really needed? $self->{description}='**Unable to capture description due to test being disabled**'; $hRes={ test => substr($testname,1), type => 'tst', status => $TCD::DISABLE, start_dt => strftime("%Y-%m-%d",localtime()), start_tm => strftime("%H:%M:%S",localtime()), time => 0, }; push(@result,$hRes); } # Deal with nate post process - A file suppplied by user. # Expected: A thpl. elsif ($testname =~ m/$nate_postsetup_label/) { # Re-load File::BaseName as the following code is being run in # the main package context, not TCD context. See 20 odd lines above. use File::Basename qw(fileparse); # Since the postprocess label is seen, time to get location of # postprocess from where it is actually stored. my $pprocess_thpl = $self->{nate_postsetup}; $Tharn::Log->trace("$pprocess_thpl") if $Log->may_trace(); my ($pprocess_name, $pp_dir) = fileparse($pprocess_thpl, '.thpl'); $pp_proc = NATE::Process->new( codespec => $pprocess_thpl, runid => "nate_post_process_" . $pprocess_name, background => 0, # Wait for it to complete. ); if ($pp_proc) { $pp_proc->start(); $pp_proc->wait(); $pp_proc->destroy(); } $Tharn::Log->trace("NATE POST TEST $testname started with runid " . $pp_proc->runid()) if $Tharn::Log->may_trace(); next; } # Check to see if test has been defined elsif (defined(&{*main::.$testname})) {# XXX: That dot looks wrong $Tharn::Log->trace("Test $testname exists - executing") if $Tharn::Log->may_trace(); my $not_config = ($testname !~ $Config_Regex); if ( !$self->{bNoPre} && $not_config ) { my $diag_pre_routine; if ( defined( &{*main::.'diag_pre_testcase'} )) { $diag_pre_routine = 'diag_pre_testcase'; } if ( $diag_pre_routine ) { package main; eval { $Tharn::Log->comment('----------------------------------------'. "\nINFO: Calling Pre-Testcase main::$diag_pre_routine\n". '----------------------------------------'); no strict 'refs'; &{*main::.$diag_pre_routine}($testname); }; my $err = $@; if ( $err ) { $Tharn::Log->warn("Pre-Routine $diag_pre_routine failed with $err"); } } } package TCD; $hRes=$self->_run_method($testname); # Store each test result hash into an array. push(@result,$hRes); if ( !$self->{bNoPost} && $not_config ) { my $diag_post_routine; if ( defined( &{*main::.'diag_post_testcase'})) { $diag_post_routine = 'diag_post_testcase'; } if ( $diag_post_routine ) { package main; eval { $Tharn::Log->comment('----------------------------------------'. "\nINFO: Calling Post-Testcase main::$diag_post_routine\n". '----------------------------------------'); no strict 'refs'; &{*main::.$diag_post_routine}($testname,$hRes->{status}); }; my $err = $@; if ( $err ) { $Tharn::Log->warn("Post-Routine $diag_post_routine failed with $err"); } } } # TCD_ABORT_SEVERITY # if user sets this param and $hRes->{status} matches it then # switch status value to FATAL for TCD to exit out as in FATAL # supported values map directly to ntest --abortsev # if ( $self->get_param('TCD_ABORT_SEVERITY') and $hRes->{status} < $TCD::PASS ) { my $user_abort = $self->get_param('TCD_ABORT_SEVERITY'); my $_chg = ''; $Log->comment("TCD_ABORT_SEVERITY set: [$user_abort ]status: $hRes->{status}"); # FAIL->FATAL if ( $user_abort =~ /FAIL/i and $hRes->{status} == $TCD::FAIL ) { $_chg = 'FAIL'; } # NA->FATAL elsif ( $user_abort =~ /NA/i and $hRes->{status} == $TCD::NA ) { $_chg = 'NA'; } # CONFIG->FATAL elsif ( $user_abort =~ /CONFIG/i and $hRes->{status} == $TCD::CONFIG ) { $_chg = 'CONFIG'; } # SCRIPT->FATAL elsif ( $user_abort =~ /SCRIPT/i and $hRes->{status} == $TCD::SCRIPT ) { $_chg = 'SCRIPT'; } # WARN-> FATAL elsif ( $user_abort =~ /WARN/i and $hRes->{status} == $TCD::WARN ) { $_chg = 'WARN'; } # All non passing, (shortcut) elsif ( $user_abort eq 'ANY' and ( $hRes->{status} < 1 and $hRes->{status} > -9) ) { $_chg = 'ANY_FAILURE'; } # no change else { $Log->comment("TCD_ABORT_SEVERITY [$user_abort] " ."not match for switch to TCD::FATAL" ); } # comment on change if ( $_chg ) { $Log->comment("TCD_ABORT_SEVERITY changed TCD::$_chg $hRes->{status} to TCD::FATAL"); $hRes->{status} = $TCD::FATAL; } } # End all testing if fatal error occurs. if ($hRes->{status} == $TCD::FATAL) { Tharn::tstlog(-comment=>'FATAL ERROR on '.$testname.'()'); $self->{msg} = 'ERROR FATAL: Testing aborted due to fatal return from '.$testname.'()'; # Set the index to the next to last element. Check to see # if it's cleanup. If it is, then leave it and execute # cleanup and uninit. If it's not, set the index to the last # element (uninit). if ((($testname ne 'cleanup') and ($testname ne 'uninit'))) { $idx = scalar(@{$self->{tests}}) - 2; if (($self->{tests}->[$idx] ne 'cleanup') or ($testname eq 'init')) { # Go to last test in list, which should be uninit $idx++; } } next; } } # Inform user that no method stub exists for # the test case else { package TCD; if (not scalar(grep{$_ =~ $test} @StdMethods)){ $self->display("Test '$test' not found\n"); push @missing, $test; } next; } if(scalar(@missing)) { die NATE::Result::Config->new("Tests @missing could not be found"); } # If init() or Single setup() and either method Fails mark all the # testcass as FAIL and straightaway jump to execution of cleaup/uninit if ($hRes->{test} =~ $Setup_Regex && $hRes->{status} =~ $All_Err_Regex && (! $self->{bRepeatSetup} || $hRes->{test} eq 'init')) { $self->{msg} = 'ERROR: Testing aborted due to error from '.$testname.'()'; $idx++ if $self->{tests}->[$idx] eq 'setup'; #move on as we need to go to user testcase my $tc_start_id = $idx; # Set the index to the next to last element. Check to see # if it's cleanup. If it is, then leave it and execute # cleanup and uninit. If it's not, set the index to the last # element (uninit). $idx = scalar(@{$self->{tests}}) - 2; if (($self->{tests}->[$idx] ne 'cleanup') or ($testname eq 'init')) { # Go to last test in list, which should be uninit $idx++; } # Create testcase entry in database for all the tests and mark them fail if ( $self->{bPropagateConfigFailure} ) { for(my $i = $tc_start_id; $i < $idx; $i++) { my $test = $self->{tests}->[$i]; next if $test =~ /setup|cleanup/; my $description = ''; if (defined($self->{testcases}{$test})) { $description = $self->{testcases}{$test}; } my $tid = $self->_testcase_start($test, $description); my $hRes = { test => $test, status => $TCD::FAIL, type => 'tst', id => $tid, }; push @result, $hRes; NATE::Result::Fail->new("Testcase never executed: " . $self->{msg})->log(); $self->_testcase_end(); } $self->{msg} .= "\nMarking all tests as FAIL."; } } # Else If Running Repeat setup() and it Fails mark the # next test (user testcase) as FAIL elsif ($hRes->{test} eq 'setup' && $hRes->{status} =~ $All_Err_Regex && $self->{bRepeatSetup} ) { my $test = $self->{tests}->[$idx]; # Create testcase entry in database for the next testcase and mark it fail if ( $self->{bPropagateConfigFailure} && $test !~ /$Config_Regex/ ) { $self->{msg} .= "ERROR: Marking '$test' as FAIL, due to error from $testname()\n"; my $description = ''; if (defined($self->{testcases}{$test})) { $description = $self->{testcases}{$test}; } my $tid = $self->_testcase_start($test, $description); my $res = { test => $test, status => $TCD::FAIL, type => 'tst', id => $tid, }; push @result, $res; NATE::Result::Fail->new("Testcase never executed: ERROR: Marking '$test' as FAIL, due to error from $testname()\n")->log(); $self->_testcase_end(); } $idx++; } # Else If cleanup() or uninit() and either method Fails mark the # previous test (user testcase) as FAIL # NOTE: in case of uninit() failure, cleanup will not be marked as FAIL, # rather the last user testcase (just before cleanup) will be marked FAIL elsif (($hRes->{test} eq 'cleanup' || $hRes->{test} eq 'uninit') && $hRes->{status} =~ $All_Err_Regex) { my $test = $self->{tests}->[$idx - 2]; my $current_config_test_result = pop @result if $self->{bPropagateConfigFailure}; my $cleanup_result = undef; if ( $test eq 'cleanup' ) { #move back as we need to go to user testcase $test = $self->{tests}->[$idx - 3]; $cleanup_result = pop @result if $self->{bPropagateConfigFailure}; } # Create testcase entry in database for the next testcase and mark it fail if ( $self->{bPropagateConfigFailure} && $test !~ /$Config_Regex/ ) { my $description = ''; if (defined($self->{testcases}{$test})) { $description = $self->{testcases}{$test}; } my $hRes = { test => $test, status => $TCD::FAIL, type => 'tst', }; if (defined($self->{testcase_to_qc_name}->{$test})) { $test = $self->{testcase_to_qc_name}->{$test}; } my $cur_res = pop @result; # drop the previous result for this testcase to prevent showing two result for the same testcase in the summary my $test_id = $cur_res->{id}; # get the id of the testcase to be marked as FAIL if ( $test_id ) { $self->{msg} .= "ERROR: Marking '$test($test_id)' as FAIL, due to error from $testname()\n"; push @result, $hRes; push @result, $cleanup_result if $cleanup_result; push @result, $current_config_test_result; my $result = NATE::Result::Fail->new("ERROR: Marking '$test($test_id)' as FAIL, due to error from $testname()\n", subtest_id => $NATE::Result::Subtest_Id); if (!$self->{current_rerun_attempt}) { my $test_db = NATE::Database::Test::testdb_global(); $test_db->register_result(subtest_id => $NATE::Result::Subtest_Id, parent_id => $test_id, result => $result); $test_db->db_commit; } } } } } # Display overall summary of test results $self->_run_summary(\@result) if (scalar(@result)>0); } ##########################################################################4##### # Method: run_test # Objective: run test methods from program # Details: See POD documentation at the end of this file. ############################################################################### sub run_test { my $self = shift; $self->_clear_err(); my $RetVal = $self->_perform_one_time_activities(); return $RetVal if ($RetVal != $TCD::TRUE); $self->execute_test(); # If any testcase failed and max_rerun_attempts is specified by user then execute all the testcases once more. while ($self->{total_failure} && (++$self->{current_rerun_attempt} <= $self->{max_rerun_attempts})) { $Log->comment("RERUN Attempt #".$self->{current_rerun_attempt}); $self->_clear_err(); $self->execute_test(); } # Delete summary files that have minimal content. # Below piece of code is kind waste and can be removed. if (-e $self->{summary_file}) { if (stat($self->{summary_file})->size < 10) { unlink ($self->{summary_file}) || do{ $self->_set_err('Error: TCD::run_test(): Unable to delete empty', "file ".$self->{summary_file}. " : $!"); return $self; } } } } ############################################################################### # Method: step # Objective: Display formatted test step # Details: See POD documentation at the end of this file. ############################################################################### sub step { my ($self,@step)=@_; my($bFirst,$header,@output,); if (not defined $step[0]) { return; } # initialize $bFirst = 1; $self->{step_nbr} += 1; # Add the appropriate string to the testcase header # If we aren't processing the firt step, show how much time the # previous step took if ($self->{step_nbr} > 1) { my $current_step_time = time - $self->{step_time}; my $time_string = $self->timer_format(sprintf("%12.2f", $current_step_time), 1); $header = '== STEP ' . $self->{step_nbr} . " (+$time_string):"; } else { $header = '== STEP ' . $self->{step_nbr} . ':'; } # Save the current time so we know how long the next step takes $self->{step_time} = time; push @output,"\n",$Border; # size of step number + semicolon my $border_prefix = '== '.' 'x(length($self->{step_nbr}) + 1); push @step, _get_link_to_script(); foreach my $line (@step[0..scalar(@step)-1]) { if(!$bFirst) { push(@output, $border_prefix.$line); next; } push(@output, $header.' '.$line); $bFirst=0; } push @output, '='x 80; $self->nlog(@output); } ############################################################################### # Private Methods ############################################################################### ############################################################################### # Method: _args_list # Objective: validate thpl files TESTCASES value # Details: See POD documentation at the end of this file. ############################################################################### sub _args_list { my ($self,$tests) = @_; my (@options, @tests,@methodlist); $self->_clear_err(); # initialize test counters $self->{test_nbr}=0; $self->{test_cnt}=0; @methodlist=@$tests; my $tc_excluded; my %methods; no strict; tie %methods, Tie::IxHash; use strict; for ( my $i = 0; $i < scalar(@methodlist);) { $methods{ $methodlist[ $i++ ] } = $i; } # Separate the tests from the # the command line options into # their own arrays, @tests and @options # respectively foreach my $argstr (@{$self->{argv}}){ foreach my $arg (split(/\,/,$argstr)){ if ($arg=~$Options_Regex){ # Command line option push @options,$arg; } elsif ($arg=~/\.\./){ # Index List specified my ($start,$end)=split(/\.\./,$arg); my $max_testcases = scalar(@methodlist); if ( $start > $max_testcases && $end > $max_testcases ) { my @err = ('Error: TCD::run_test(): ', 'Invalid index range TESTCASES='.$start.'..'.$end.' found!!!', 'There are total '.$max_testcases.' testcases in your testsuite.', 'Both Start Index: '.$start.' and End Index: '.$end.' are out of range', "The maximum index range allowed is 1..$max_testcases"); $self->_set_err($Border, @err, $Border); # Log result and add it to the results list. NATE::Result::Config->new('Invalid index range')->log(); return; } elsif ( $start > $max_testcases ) { $Log->warn("Index($start) is out of range in TESTCASES=$start..$end. Maximum index value can be $max_testcases"); $start-- while (!defined($methodlist[$start-1])); } elsif ( $end > $max_testcases ) { $Log->warn("Index($end) is out of range in TESTCASES=$start..$end. Maximum index value can be $max_testcases"); $end-- while (!defined($methodlist[$end-1])); } if ($start<=$end){ $start--; $end--; push @tests, @methodlist[$start..$end]; } else { if($start>=$end){ $start--; $end--; push @tests, reverse @methodlist[$end..$start]; } } } elsif ($arg=~/^[0-9]{1,3}$/){ # Index specified push @tests,$methodlist[$arg -1] if (defined($methodlist[$arg-1])); } elsif($arg=~/^\d+\*\d+$/){ # Index specified with repeat my ($id,$repeat)= $arg=~/(\d+)\*(\d+)$/; push @tests, ($methodlist[$id -1]) x $repeat if (defined($methodlist[$id-1])); # push test at index ($id - 1) $repeat times if defined } elsif ($arg=~/\%/){ # Wildcard specified my @tmp=split(/\%/," $arg "); my $arg=join('.*',@tmp); # Remove leading/trailing spaces $arg =~ s/^\s+//; $arg =~ s/\s+$//; push(@tests, grep{/^$arg$/} @methodlist); } elsif($arg=~/^all\*\d+$/i or $arg=~/^\%\*\d+$/){ # repeat all tests my (undef,$repeat)=split(/\*/,$arg); push @tests,(@methodlist) x $repeat; #push all tests $repeat times } elsif($arg=~/^all$/i or $arg=~/^\%$/){ push @tests,(@methodlist); # all tests } elsif($arg=~/\S+\*\d+/){ # exact match with repeat my ($name,$repeat)= $arg=~/(\S+)\*(\d+)$/; push @tests, ($name) x $repeat; # push specific test $repeat times } elsif($arg =~ /^-([\d\w\W]+)$/) { # Exclude tests prefixed with (-) my $tc_exclude = $1; if($tc_exclude =~ /^\d+$/) { #index-values foreach my $key ( keys %methods ) { if ( $tc_exclude eq "$methods{$key}" ) { delete $methods{$key}; $tc_excluded = 1; last; } } } else { foreach my $key ( keys %methods ) { #non-index values if ( $tc_exclude eq $key ) { delete $methods{$key}; $tc_excluded = 1; last; } } } $Log->warn('Trying to exclude a testcase that does not exist: '.$arg) if(!$tc_excluded); } else { # exact match push @tests,$arg; } } } if($tc_excluded) { foreach ( keys %methods ) { push @tests, $_ if ( defined $methods{$_} ); } } # Check if valid test options were supplied foreach my $opt (@options){ unless ($opt =~ /^-q$|^-rsc$|^-ns$|^-nc$|^-nsc$|^-cl$|^-e$|^-st$|^-l$|^-h$|^-ndpre$|^-ndpos$|^-pcf$/){ $self->_set_err($Border,"Error: Invalid option in TESTCASES list '$opt'",$Border); NATE::Result::Config->new("Invalid TESTCASES option '$opt'",abortive=>0)->log(); return; } } # Store reference to options array $self->{options}=\@options; # Check if at least one valid test name was supplied and # TESTCASES request was not list(-l), setup(-st), or cleanup(-cl) operation. if (scalar(@tests) eq 0 and not scalar(grep{/^-l$/} @{$self->{options}}) and not scalar(grep{/^-cl$|-st$|-h$|-nc$|-ns$|-nsc$|-q$/} @{$self->{options}})){ $self->_set_err($Border, 'Error: No test names were entered on the command line', ' for test execution.', $Border); NATE::Result::Config->new('No test names entered',abortive=>0)->log(); return; } # Store reference to tests array $self->{tests}=\@tests; # Check that user hasn't mistakingly placed one of # of the standard methods in the @TESTS array if (scalar(grep{$_=~$Config_Regex} @{$self->{methodlist}})){ $self->_set_err($Border, "Error: The following methods should not be included ". "in the testcase\n ", "\@TESTS array; init, setup, cleanup, uninit", $Border,); return; } # Store number of TCDs to execute $self->{test_cnt} = scalar(@tests); return; } ############################################################################### # Method: _is_quiet # Objective: check to see if logging should be displayed to stdout # Details: See POD documentation at the end of this file. ############################################################################### sub _is_quiet { return (scalar(grep{/^-q/} @{shift->{options}})); } ############################################################################### # Method: _list_tests # Objective: List available test names # Details: See POD documentation at the end of this file. ############################################################################### sub _list_tests { my ($self,$tests)=@_; $self->_clear_err(); # Print test summary to the display only. my $border = '#'x 50; $self->display( $border, '## Available Test Cases', $border); my ($cnt, $state); foreach my $tst (@$tests) { $cnt++; if ($tst=~/^\-/){ $tst=substr($tst,1); $state='(DISABLED)'; } else { $state=''; } $self->display( "$cnt) $tst $state"); } $self->display("\n"); } ############################################################################### # Method: _run_method # Objective: Run method # Details: See POD documentation at the end of this file. ############################################################################### sub _run_method { my ($self,$test)=@_; $self->_clear_err(); # Place a marker in NATE LOG to show where next # test starts my $description = ''; if (defined($self->{testcases}{$test})) { $description = $self->{testcases}{$test}; } my $test_type = ($test =~ $Config_Regex) ? 'cfg' : 'tst'; $self->{id} = $self->_testcase_start($test, $description) if($test_type eq 'tst'); $self->{testname}=$test; no strict 'refs'; # XXX: Why? my $hRes = {}; $hRes->{type} = $test_type; $hRes->{start_dt} = strftime("%Y-%m-%d",localtime());; $hRes->{start_tm} = strftime("%H:%M:%S",localtime());; # Start a timer $self->timer_start('tmA'); # test name $hRes->{test}=$test; $hRes->{id} = $self->{id}; # # Context switch to the "main" context and # execute the test in that context # package main; eval { $hRes->{status}=&{*main::.$test}($self); }; # # Save the possible error string to a variable in the TCD context # my $eval_msg = $@; # # Clear the variable returned by eval so we get accurate # return value on the next call # $@=undef; # # Context Switch back to the TCD context # package TCD; # # If the testcase died, check known strings for a script error. # If it's not a script error then we can call it a fatal error. # if ($eval_msg) { my $default_error; if($test_type eq 'tst') { $default_error = uc(NATE::ParamSet::param_global()->get('TCD_DEFAULT_ERROR_TESTCASE')); } else { $default_error = uc(NATE::ParamSet::param_global()->get('TCD_DEFAULT_ERROR_CONFIG')); } my $current_status = $self->_perlmsg_lookup($eval_msg, $default_error); my $ref_msg = ref($eval_msg); if (length($ref_msg)) { if(UNIVERSAL::isa($eval_msg,'NATE::Result') || UNIVERSAL::isa($eval_msg,'NATE::BaseException')) { # Only call gather_diagnostics if gather_diagnostics are # allowed. By default, gather_diagnostics are NOT allowed if ($self->gather_diagnostics()) { # if gather_diagnostics is defined by exception and called, # then gather_diagnostics will execute commands that gather # more information to help triage what went wrong if ($eval_msg->can('gather_diagnostics')) { $Log->comment($ref_msg."->gather_diagnostics():"); eval { $eval_msg->gather_diagnostics(); }; my $err = $@; if ( $err ) { $Log->comment("gather_diagnostics failed with $err"); } } } $self->nlog('TCD::_run_method():' . "testcase exited with reference $ref_msg: $eval_msg"); } else { require Data::Dumper; $self->nlog('TCD::_run_method():' . "testcase exited with reference $ref_msg:\n"); $self->nlog('............................Begin Dump.....................................'); $self->nlog(Data::Dumper::Dumper($eval_msg)); $self->nlog('............................End of Dump....................................'); } } else { $self->nlog('TCD::_run_method(): testcase exited: '.$eval_msg); } $hRes->{status} = $current_status; } $hRes->{status} = $TCD::UNDEF if (!defined($hRes->{status})); $self->{log}->flush(); # Stop timer $hRes->{time} = $self->timer_stop('tmA'); my $result_obj; if ($hRes->{status} == $TCD::CONFIG) { $self->log('-- FAIL (CONFIG)'); $result_obj=NATE::Result::Config->new('Testcase Complete: '.$test.'() Config Error', abortive=>0); } elsif ($hRes->{status} == $TCD::SCRIPT){ $result_obj=NATE::Result::Script->new('Testcase Complete: '.$test.'() Script Error', stacktrace=>1, abortive=>0); $self->log('-- FAIL (SCRIPT)'); } elsif($hRes->{status} == $TCD::PASS){ $self->log('-- PASS'); $result_obj=NATE::Result::Pass->new('Testcase Complete: '.$test.'() passed'); } elsif($hRes->{status} =~ $Fail_Regex){ $self->log('-- FAIL'); $result_obj=NATE::Result::Fail->new('Testcase Complete: '.$test.'() failed'); } elsif($hRes->{status} == $TCD::NA){ $self->nlog('Test case does not apply for this configuration'); $self->log('-- NA'); $result_obj=NATE::Result::NA->new('Testcase Complete: '.$test.'() is not applicable',abortive=>0); } elsif ($hRes->{status} == $TCD::WARN){ $result_obj=NATE::Result::Warn->new('Testcase Complete: '.$test.'() warned'); $self->log('-- PASS (WARN)'); } else { $hRes->{status} = $TCD::SCRIPT; $self->nlog('Test case did not return a recognized status such as ' . '$TCD::PASS, $TCD::FAIL, $TCD::FATAL, $TCD::CONFIG ' . '$TCD::NA, $TCD::SCRIPT.'); $result_obj=NATE::Result::Script->new('Testcase Complete: '.$test.'() Script Error',stacktrace=>1,abortive=>0); $self->log('-- FAIL (SCRIPT)'); } # do not log 'standard method' results if TCD_STRICT_RESULTS is set my $not_strict_results = ! $self->get_param('TCD_STRICT_RESULTS', 0); # is the current test a 'standard method'? my $not_a_std_method = !scalar(grep $test eq $_, @StdMethods); # is this a setup exception? (this check was copied from line run_test(); # without it, no NATE result will be logged for these errors # and then ntest will interpret that as a PASS (not what we want). my $setup_exception = ($hRes->{test} =~ $Setup_Regex && $hRes->{status} =~ $All_Err_Regex); # evaluate the conditions to determine if we should log a result if ($not_strict_results || $not_a_std_method || $setup_exception) { Tharn::logresult( $result_obj ); } $self->_testcase_end() if ($test_type eq 'tst'); use strict 'refs'; return $hRes; } ############################################################################### # Method: _run_summary # Objective: Display test summary when run standalone (outside harness) # Details: See POD documentation at the end of this file. ############################################################################### sub _run_summary { my ($self,$result)=@_; my ($buffer, $logsdir,$testdir,$rel_location); $rel_location='No Nate Log File (LOGDIR=-)'; # Print test summary to the display only. if (my $logdir = NATE::ParamSet::param_global()->get('LOGDIR')) { ($logsdir,$testdir)= $logdir =~ /\/(\S+)\/(\S+)$/; # XXX: maybe File::Basename can be used for this $rel_location="/".$logsdir."/".$testdir; # XXX: Change this to catdir } $self->{stdout}=defined(fileno(\*ntest::NTEST_STDOUT)) ? \*ntest::NTEST_STDOUT : new IO::Tee(); my $border = '#'x80; $buffer = sprintf("\n\n%-80s\n%s %s\n%s %s\n%s %s\n%s %s\n%-80s\n", $border, '## TESTBED: ', $self->{testbed}, '## TESTPATH:', $self->{tcpath}, '## TESTSCRIPT:', $self->{testscript}.'.thpl', '## TESTLOG: ', $rel_location, $border); $self->ndisplay($self->_summary($buffer)); if (defined $self->{msg}) { $self->ndisplay($self->{msg}."\n\n"); } else { $self->ndisplay("\n\n"); } } sub _summary # XXX: Let's do this by groking the DB instead of keeping this Shit in memory { my ($self, $buffer)=@_; my $status; # Initialize counters my $cnt = 0; my $tot_time = 0; my $tot_tests = 0; my $tot_pass = 0; my $tot_warn = 0; my $tot_fail = 0; my $tot_disable = 0; my $tot_config = 0; my $tot_script = 0; my $tot_na = 0; my $tot_undef = 0; my $tot_notrun = 0; # The tests array get preloaded with the cfg cases whether they # exist or not. If they don't exist, don't count them. $tot_tests = scalar(@{$self->{tests}}) if defined @{$self->{tests}}; $tot_tests-- unless(defined(&{*main::.'init'})); $tot_tests-- unless(defined(&{*main::.'setup'})); $tot_tests-- unless(defined(&{*main::.'cleanup'})); $tot_tests-- unless(defined(&{*main::.'uninit'})); $buffer = '' if(!defined($buffer)); $buffer .=sprintf("%-4.4s %-4.4s %-8.8s %-12.12s %-52s\n". "%4.4s %4.4s %-8.8s %-12.12s %-52s\n", "Nbr","Type","Result","Time(s)","Test Case", "="x4,"="x4,"="x8,"="x12,"="x48); my %status = ($TCD::SCRIPT => 'Script', $TCD::CONFIG => 'Config', $TCD::NA => 'N/A', $TCD::FATAL => 'Failed**', $TCD::UNDEF => 'Undef', $TCD::DISABLE=> 'Disable-', $TCD::FAIL => 'Failed**', $TCD::PASS => 'Passed', $TCD::WARN => 'Warning',); no strict 'refs'; foreach my $hsh (@{$self->{result}}){ $cnt++; $status = $status{$hsh->{status}}; # Don't count the standard methods in the total pass/fail counts if ($hsh->{type} && $hsh->{type} eq 'tst') { # XXX: Can something better be done here? if($hsh->{status}==$TCD::PASS) { $tot_pass++; } elsif($hsh->{status}=~$Fail_Regex) { $tot_fail++; } elsif($hsh->{status}==$TCD::CONFIG) { $tot_config++; } elsif($hsh->{status}==$TCD::SCRIPT) { $tot_script++; } elsif($hsh->{status}==$TCD::DISABLE) { $tot_disable++; } elsif($hsh->{status}==$TCD::NA) { $tot_na++; } elsif($hsh->{status}==$TCD::WARN) { $tot_warn++; } } else { $tot_tests--; } $tot_time+=$hsh->{time}; $buffer.=sprintf("%4d %-5.5s%-8.8s %12.2f %-52s\n", $cnt, (defined $hsh->{type} and $hsh->{type} =~/tst/i)? 'tst' : 'cfg',# XXX: WTF? How could this not be defined? (defined $status) ? $status : 'undef', $hsh->{time}, $hsh->{test}); } $tot_notrun = $tot_tests - $tot_pass - $tot_fail - $tot_config - $tot_script - $tot_disable - $tot_na; $self->{total_failure} = ($tot_fail + $tot_config + $tot_script) if ($self->{max_rerun_attempts}); $buffer .= sprintf("%-80s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n", $Border, ' Time: ', $self->timer_format(sprintf("%12.2f",$tot_time)), ' Pass: ', $tot_pass, ' Warn: ', $tot_warn, ' Fail: ', $tot_fail, ' Disable: ', $tot_disable, ' Config: ', $tot_config, ' Script: ', $tot_script, ' Non App: ', $tot_na, ' Not Run: ', $tot_notrun); use strict 'refs'; return $buffer; } ############################################################################### # Method: _get_nuffo_description # Objective: description to use in NUFFO # Details: See POD documentation at the end of this file. ############################################################################### sub _get_nuffo_description # XXX: Is this still used? { my $self = shift; my $description=$self->{testcases}{$self->get_testcase()}; return (defined $description) ? $description : ''; } sub _help # XXX: This should be in POD { my $self = shift; my $switch = <log($switch); } ############################################################################### # Method: _perlmsg_lookup # Objective: Determine whether a given argument matches a Perl compilation # or runtime error message, and return a corresponding Result type. # Details: This is called when a testcase dies and gets caught by an # eval and we are trying to figure out if it's a SCRIPT error # or a CONFIG error based on the string returned. If the # item that was returned is not a string we don't handle # that here. # # This method was copied from Subtest.pm. ############################################################################### sub _perlmsg_lookup { my ($self, $msg, $default_error) = @_; my $msg_ref = ref($msg); # Handle Exceptions # Get the type of result from the method result_type and convert it to # a TCD result if (is_blessed($msg_ref) && (UNIVERSAL::isa($msg_ref,'NATE::BaseException'))) { if($default_error) { eval('$TCD::'.$default_error); } if (UNIVERSAL::can($msg_ref,'result_type')) { my $result_type = $msg_ref->result_type(); $Log->comment('Got an exception object. Result Type is '.$result_type); if ($result_type eq 'FAIL') { return $TCD::FAIL; } elsif ($result_type eq 'FATAL') { return $TCD::FATAL; } elsif ($result_type eq 'SCRIPT') { return $TCD::SCRIPT; } elsif ($result_type eq 'CONFIG') { return $TCD::CONFIG; } else { # could not understand the result $Log->debug("TCD cannot understand result '$result_type'"); return $TCD::FAIL; } } else { # object does not have result_type method $Log->debug("Object does not have method 'result_type'"); return $TCD::FAIL; } } # # If we got a NATE Fatal, back return fatal, this will be the last # testcase executed. If we got any other non-string back, just # return FAIL. If we got a string, check for script and config # errors against the known defined messages above. Otherwise # just return FAIL. # my $error_level; if($msg_ref) { if($msg_ref->isa('NATE::Result::Fatal')) {# XXX: we *could* do this better, but I don't think we should $error_level = $default_error || 'FATAL'; } else { $error_level = $default_error || 'FAIL'; } } else { $error_level = perlmsg_lookup($msg) || $default_error || 'FAIL'; } return eval('$TCD::'.$error_level); } ############################################################################ # Subroutine: _map_nate_result_to_TCD_status # Parameters: NATE severity code of the failure, abortive flag # Returns: TCD status code # Purpose: Convert from a NATE severity code to a TCD status code # # Note: Not all NATE results have a direct TCD mapping. Most of the missing # severities are reasonably clear what they should be mapped to, but Inspect # is a little questionable. The NATE documentation says that the test was not # able to determine a result and the log files must be examined manually to # determine pass/fail. It would seem safest to count this as a failure. # # NATE::Result::Script::severity = 6 ==> $TCD::SCRIPT = -8 # NATE::Result::Config::severity = 5 ==> $TCD::CONFIG = -7 # NATE::Result::Fatal::severity = 4 ==> $TCD::FATAL = -5 # NATE::Result::Fail::severity = 4 ==> $TCD::FAIL = 0 # NATE::Result::Inspect::severity = 3 ==> $TCD::FAIL = 0 (no exact TCD code match) # NATE::Result::NA::severity = 2 ==> $TCD::NA = -6 # NATE::Result::Warn::severity = 1 ==> $TCD::PASS = 1 (no exact TCD code match) # NATE::Result::Info::severity = 0 ==> $TCD::PASS = 1 (no exact TCD code match) # NATE::Result::Param::severity = 0 ==> $TCD::PASS = 1 (no exact TCD code match) # NATE::Result::Pass::severity = 0 ==> $TCD::PASS = 1 ############################################################################# sub _map_nate_result_to_TCD_status { # XXX: This can be optimized my $self = shift; my $severity = shift; my $abortive = shift; # Currently Fatal and Fail are the same severity, they only differ # in abortiveness. In case this ever changes, I am coding this # as if they are not the same severity and interpretting an # abortive Fail as Fatal, and a non-abortive Fatal as Fail. if ( $severity == NATE::Result::Script::severity() ) { return $TCD::SCRIPT; } elsif ( $severity == NATE::Result::Config::severity() ) { return $TCD::CONFIG; } elsif ( $severity == NATE::Result::Fatal::severity() ) { if ( $abortive ) { return $TCD::FATAL; } else { return $TCD::FAIL; } } elsif ( $severity == NATE::Result::Fail::severity() ) { if ( $abortive ) { return $TCD::FATAL; } else { return $TCD::FAIL; } } elsif ( $severity == NATE::Result::Warn::severity() ) { return $TCD::WARN; } elsif ( $severity == NATE::Result::Inspect::severity() ) { return $TCD::FAIL; } elsif ( $severity == NATE::Result::NA::severity() ) { return $TCD::NA; } elsif ( ($severity == NATE::Result::Info::severity()) || ($severity == NATE::Result::Param::severity()) || ($severity == NATE::Result::Pass::severity()) ) { return $TCD::PASS; } else { $Log->warn("Do not understand severity $severity"); return $TCD::FAIL; } } ############################################################################ # Subroutine: worst_subtest_result # Parameters: array of subtest object references # Returns: TCD Execution Result # Purpose: Find the most severe subtest NATE result and map it to a TCD result ############################################################################# sub worst_subtest_result { my $self = shift; my @st = @_; my @res; my $worst_of_all_subtests = NATE::Result::Pass::severity(); my $abortive = 0; # XXX: Do this though the DB foreach my $st (@st) { # XXX: make this more efficient @res = $st->results(); if (@res) { my $worst = NATE::Result::worst(@res); # keep track of the worst of the worst if ( $worst->severity() > $worst_of_all_subtests ) { $worst_of_all_subtests = $worst->severity(); $abortive = $worst->abortive(); } elsif ( $worst->severity() == $worst_of_all_subtests ) { if ( $worst->abortive() > $abortive ) { $abortive = $worst->abortive(); } } } else { $Log->debug('No result for subtest '.$st->runid().'- skipping'); } } $Log->debug( 'Worst result of all subtests is '.$worst_of_all_subtests.' abortiveness = '.$abortive ); my $tcd_rc = $self->_map_nate_result_to_TCD_status($worst_of_all_subtests,$abortive); $Log->debug('Mapped to TCD equate = '.$tcd_rc); return $tcd_rc; } ###################################################################################### # @name ::_testcase_start # @summary: # Writes a INFO Result to identify the internal test case that is being started. # This Result is used by nuffo.thpl to separate test case log records. # @arg $testcase_name Required: A unigue id or name for the new test case # Two forms of this parameter are accepted: # 'preferred_single_word_testcase_name' # 'overridden/path/testscript.thpl optional_testcase_name' # The testscript name is detected by the presence of a slash in the text. # Multiple word test case names are allowed, but will be converted # by other programs such as nuffo.thpl that replaces whitespace with underscores. # When no testscript name is passed, this function provides the current # testscript name in the generated INFO result. # @arg $testcase_description Required: A short description of the test case # # This method was copied from Burt.pm ######################################################################################## sub _testcase_start { my $self = shift; #In case of rerun we need to return from here. return if ($self->{current_rerun_attempt} > 0); my ($testcase_name,$testcase_description) = @_; my $testcase_start_text = 'Start Testcase: '; my $testcase_start_description = 'Description: '; $testcase_name = trim($testcase_name); my $testcase_full_name; if ($testcase_name !~ m/\s/ || $testcase_name !~ m/^\S+\/\S+\s+/) { # No test script file name was included # as the first word of the testcase name parameter my $testscript_name = _testscript_get(); if ($testscript_name) { $testcase_full_name = "$testscript_name $testcase_name"; } else { $testcase_full_name = $testcase_name; } } my $start_message = $testcase_start_text."$testcase_full_name"; if (defined($testcase_description) and $testcase_description !~ m/^\s*$/) { $start_message .= " ".$testcase_start_description."$testcase_description"; } # Add an INFO result that can be seen without parsing the log files Tharn::logresult('INFO',$start_message); # XXX: This is a waste of a DB time # testcase_begin registers a new test case with the job-local testcase database. # Until teststep_end is called, any new results logged by this test process and # any results logged by any subtests created will be recorded as being contained # within this new test case (step). my $qc_name = $testcase_name; if (defined($self->{testcase_to_qc_name}->{$testcase_name})) { $qc_name = $self->{testcase_to_qc_name}->{$testcase_name}; } $TestCase_Handle = Tharn::testcase_begin(name => $qc_name); return $TestCase_Handle->{id}; } ########################################################### # @name ::_testscript_get # @summary Returns the name of the active test script. # # This method was copied from Burt.pm ########################################################### sub _testscript_get { # XXX: There has to be a better way than all this regex my $msg_ref = shift; my $depth = 1; my $testscript = ''; my $previous_script = ''; while (my ($package,$filename,$line) = caller($depth)) { #print("trace: call depth: $depth $package,$filename,$line\n"); # Note: Tharn_Subtest.pm is to be replaced with Subtest.pm # Other Tharn prefixed packages to get their own names if ($filename =~ /\/Subtest.pm/) { if ($previous_script) { $testscript = $previous_script; last; } } elsif ($filename =~ /Tharn.pm/ and $package eq 'Subtest') { } elsif ($package eq 'TestObj') { # The top test invocation (usually ntest) last; } elsif ($filename =~ /Tharn|Connectrec.pm|Hostrec.pm|Param.pm|Rslt.pm/) { # Other NATE layers } else { $previous_script = $filename; } $depth++ } return $testscript; } # From old Timer base class sub timer_start { my $self = shift; my $name = shift; if (defined $name and length $name){ $self->{"timer_$name"}{start}=gettimeofday; } else { $self->{"timer_start"}{start}=gettimeofday; } } sub timer_stop { my $self = shift; my $name = shift; my $time; $self->_clear_err; if (defined $name and length $name){ unless (defined $self->{"timer_$name"}{start}){ $self->_set_err("Error stop_timer(): timer '$name' was never started\n"); return 0; } $time = gettimeofday - $self->{"timer_$name"}{start}; } else { $time = gettimeofday - $self->{"timer_start"}{start}; } return $time; } sub timer_format { my ($self,$epoch,$minimize) = @_; my ($milli,$seconds,$minutes,$hours,$time_str); ($epoch,$milli)=split(/\./,$epoch); $hours = int ($epoch / 3600); $epoch = ($epoch) % 3600; $minutes = int ($epoch / 60); $epoch = ($epoch) % 60; $seconds = $epoch; $hours = "0".$hours if ($hours < 10); $minutes = "0".$minutes if ($minutes < 10); $seconds = "0".$seconds if ($seconds < 10); if ($minimize){ if ($hours ne '00') { $hours =~ s/^0+//; # strip leading zeros $time_str .= "$hours" . "h"; } if ($minutes ne "00") { $minutes =~ s/^0+//; # strip leading zeros $time_str .= "$minutes" . "m"; } if ($seconds ne "00") { $seconds =~ s/^0+//; # strip leading zeros $time_str .= "$seconds" . "s"; } } else { $time_str .= "$hours:$minutes:$seconds.$milli"; } return "$time_str"; } ############################################################################### # Method: _determine_postsetup # Objective: See if user specified NATE_POST_SETUP parameter and store the # value. # Details: ############################################################################### sub _determine_postsetup($) { my $self = shift; # Determine whether a post setup has been specified - a Config DB # requirement. my $nate_postsetup=$self->get_param('NATE_POST_SETUP'); # Do rudimentary checking to determine file exists as specified and is # non-empty. Enforce that it be a thpl? if (-f $nate_postsetup && -s $nate_postsetup) { $self->{nate_postsetup} = $nate_postsetup; $Log->trace('Nate post setup specified ' . $self->{nate_postsetup} . ' will be run after setup.') if $Log->may_trace(); } else { $Log->trace('Nate post setup specified ' . $nate_postsetup . ' either does not exist or is an empty file.') if $Log->may_trace(); } } sub _setup_fnames($) { my $self = shift; my @return_arr = qw(setup); if (!$self->{bSetup} && $self->{nate_postsetup}) { # Return the post process label. We will retrieve the real post process # when we need it. push (@return_arr, $nate_postsetup_label); } $Log->trace('setup_fnames returns ' . join(',', @return_arr)) if $Log->may_trace(); return wantarray ? @return_arr : join(',', @return_arr); } sub _get_link_to_script { require NATE::Log; # Go back one call frame because the actual method invoked (step/description) # is one call frame back. my @stack_frame = caller 1; return NATE::Log::_get_link_to_script($stack_frame[1], $stack_frame[2]); } 1; __END__ ############################################################################# # POD Documentation ############################################################################# =head1 Package Name TCD.pm =head1 Description TCD (Multi Test Driver) extends the ntest command line interface by providing the ability to execute specific testcase methods in a single testscript. It is possible to populate QC with the results of multiple testcases run via TCD because TCD calls Tharn::begin_testcase for the user. This allows qc_push.thpl to fill in the testcase name when %t is specified in QC_TEST_NAME. If the QC testcase names do not exactly match the TCD testcase names it is possible to specify a mapping between the two to the TCD contructor. When uploading multiple testcases each testcase's log archive will point to the complete TCD logfile. Also included are additional method that utilize IO:Tee module that enables output to be mutliplexed to nate logs and STDOUT in a single call. When an exception is propagated up to TCD (because the testcases didn't catch the exception), TCD will collect diagnostics by invoking $exception->gather_diagnostics() if defined by exception class and if TCD_GATHER_DIAGNOSTICS is allowed TCD_GATHER_DIAGNOSTICS is interfaced through TCD::gather_diagnostics() Any exception class can choose to define a gather_diagnostics() method to help triage an error. When gather_diagnostics() is called, it will print out additional information to the logs to help the user triage the error. =head1 Synopsis Here are some examples for executing a test script using various TESTCASES value combinations. =over 4 =item * ntest -noconsole Syntax/storage_show.thpl TESTCASES=[] =item * TESTCASES=mytest1 Enter a single test name. =item * TESTCASES=mytest1,mytest2 Enter comma seperated list of test names. Space seperated values are allowed also if the entire TESTCASES string value is quoted. =item * TESTCASES=1,3..5,2..1,mytest1 Enter the index position or index range (base 1). =item * TESTCASES=mytest% Enter test names using the wildcard character '%'. =item * TESTCASES=all Enter keyword 'all' (or %) to execute all tests. =item * TESTCASES="3*100" Execute test case at position 3 in the @TESTS array 100 times. The value needs to be mentioned in double quotes in order to be recognized. =item * TESTCASES=-testcase_1 Disables 'testcase_1' testcase from execution. =item * TESTCASES=-3,-4 Disables testcase numbers 3 & 4 from execution. Not to be used in combination with 'all' option. Disabling testcases works with index , non-index as well as combination of both. Command Line Switches to TESTCASES parameter Switch Name Description ====== ========================= ============================================== -h Help This message -l List List available test cases to be run -st Setup Execute setup only -cl Cleanup Execute cleanup before executing test cases -q Quiet Execute test cases w/o displaying output to stdout -ns No Setup Execute test cases w/o performing setup -nc No Cleanup Execute test cases w/o performing cleanup -nsc No Setup/Cleanup Execute test cases w/o performing setup or cleanup -rsc Repeat Setup/Cleanup Execute setup and cleanup for each test case -ndpre No Diag Pre-Testcase Execute test cases w/o running any diagnostic pre-testcase routines -ndpos No Diag Post-Testcase Execute test cases w/o running any diagnostic post-testcase routines -pcf Propagate Config Failures If set, failures in config functions (init/setup/cleanup/uninit) will be propagated to associated testcase(s). Other Options o A test case can be repeatedly executed using any one of the following solutions. TESTCASES=mytest,mytest,mytest # execute test case 'mytest' 3 times TESTCASES="mytest*100" # execute test case 'mytest' 100 times TESTCASES="2*50" # execute test case at index position 2 50 times. TESTCASES=-3,-testcase_1 # Disables execution of testcase number 3 and 'testcase_1' testcase. =back =head1 PARAMETERS =head2 TESTCASES Used to provide list of test names (methods) to execute in the test script. If not specified, all tests in the testscript are executed once. =head2 TCD_DEFAULT_ERROR_TESTSCASE Override the default error that is thrown when testcase functions were written poorly and do not catch their exceptions Example: TCD_DEFAULT_ERROR_TESTSCASE=SCRIPT =head2 TCD_DEFAULT_ERROR_CONFIG Override the default error that is thrown when configuration functions (init/setup/cleanup/uninit) were written poorly and do not catch their exceptions Example: TCD_DEFAULT_ERROR_CONFIG=PASS =head2 TCD_STRICT_RESULTS If true, TCD will NOT log NATE results for the init/setup/cleanup/uninit methods - if they PASS. Any FAIL result which terminates the run will still generate a NATE result, regardless of which method failed. =head2 TCD_ABORT_SEVERITY If defined, TCD will act as TCD::FATAL defines if a non-passing result is seen. Non-passing result are TCD::FAIL, TCD::NA, TCD::CONFIG, TCD::SCRIPT. TCD::PASS and TCD::WARN are considered passing results. The argument can be multiple result strings or just one result string, case insensitive =head2 TCD_GATHER_DIAGNOSTICS If enabled, then for an exception object $exception_obj, TCD will call $exception_obj->gather_diagnostics() if the method is defined. The default value is 0. The available values are 1 and 0. gather_diagnostics() should execute commands on filer/node/cluster or client which will send diagnostic info to the logs. =over =item TCD_ABORT_SEVERITY=SCRIPT will FATAL if result is TCD::SCRIPT =item TCD_ABORT_SEVERITY=script will FATAL if result is TCD::SCRIPT =item TCD_ABORT_SEVERITY=SCRIPT,NA will FATAL if result is TCD::SCRIPT or TCD::NA =back =head1 METHODS =head2 new Creates a new Mult Test Driver (TCD) object. =over =item C<< -testcases => \@testcases >> (Required) test method name/descriptions (key/value pair) that can be executed =item C<< -options => $options >> (Optional) Comma separated list of TCD options. Examples: -q -rsc -ns -nc -nsc -cl -e -st -l -h -pcf =item C<< -testcase_to_qc_name => \%testcase_name_conversion >> (Optional) Hash reference of TCD testcase names as the keys and the qc testcase names as the values. This is useful if the tcd names do not match the qc names. Providing a mapping between the names will allow TCD to call Tharn::begin_testcase with the proper QC testcase name. If mappings are not provided for all cases TCD will use the subroutine name for cases without mappings. If mappings are needed but should be provided at run time consider using qc_push.thpl's QC_TC_MAP_FILE or QC_TC_MAP_DATA. =item Outputs Returns a TCD object. =item Example 1 # Test case method names to control my @TESTS=qw( test1 test2 test3); # Create a Test Case Object $Test = new TCD(-testcases=>\@TESTS); if ($Test->error){ $Test->nlog($Test->errmsg); return $TCD::FAIL; } =item Example 2 $Test = TCD->new( -testcases => ["one","two","three"], -testcase_to_qc_name => { "one" => "WAFL123-001-002-003 one", "two" => "WAFL123-001-002-003 two", } ); =back =head2 description This method creates a standard format header for the testcase method being executed. The header contains the name of the testcase script, test method, along with the user defined description. Alternatively, calling this method with no arguments will use the current testcase's description from the @Testcases array in the header. This method should only be called once at the beginning of each public method and standard methods if used (init(), setup(), cleanup(), and uninit()). =over =item Inputs =item * $descr|@arr|undef Detailed description of the test =item Outputs None =item Examples sub mytest1() { # print testcase header $Test->description( "This test verifies that a single", "volume group containing 1 volume", "can be snapped" ); ... ... ... return $TCD::PASS; } Alternatively: sub mytest1() { # print testcase header $Test->description(); # Uses description from @Testcases for this testcase. ... ... ... return $TCD::PASS; } Note: Support has been added for diagnostic pre and post routines. These routines (if defined in your script) will be run before and after your testcase is run, but will not contribute to the pass/fail status of that testcase. These routines provide a convenient place to run filer commands that may aide in triaging failures. If you do not define routines that start with diag_pre_/diag_post_, this feature will not be activated. If you define a "diag_pre_testcase()" routine, that routine will be executed prior to running each testcase in your script. Likewise, if you define a "diag_post_testcase()" routine, it will be executed after each testcase is run. The diag_pre_testcase() routine will be passed the name of the testcase that will be called after it. The diag_post_testcase() routine will be passed that name as well as the status of the testcase run before it. A diagnostic pre-routine might be needed if you need to clear out filer tables prior to running a testcase. In the diagnostic post-routine, you may want to only run diagnostic commands in the event of a testcase failure, but the routine will be called regardless, so you are not limited in that regard. Ex: sub diag_post_testcase { my $tc_name = shift; my $tc_result = shift; if ( $tc_result != $TCD::PASS ) { foreach my $server ($Server->servers()) { $Server->set_execution_server(-Server =>$server); eval {$Server->network_interface_show();}; eval {$Server->network_port_show();}; } } } =back =head2 display This method displays information to STDOUT. This information is not captured in any log files. A carriage return is automatically appended to the input buffer. =over 4 =item Inputs =item * @msg An array or scalar of information to be logged to a file for the test being run. =item Outputs None =item Examples # Determine if error occurred... $Test->display("This text will only be written to STDOUT"); =back =head2 get_dir Directory location of testcase being executed =over =item Inputs none =item Outputs Returns a scalar containing the directory name of the testcase. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $Test->get_dir("Testcase is located in directory'".$Test->get_dir()."'"); =back =head2 get_loghdl This method retrieves the log handle that is used by the the TCD object to multiplex log results. This handle can be passed to other modules that are used by a test for logging additional information. =over =item Inputs none =item output Returns the file handle from TCD object. =item Examples $loghdl=$Test->get_loghdl(); =back =head2 get_name Name of testcase =over =item Inputs none =item Outputs Returns the name of the test case. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $Test->nlog("Test case name is '".$Test->get_name()."'"); =back =head2 gather_diagnostics If called without arguments, then gather_diagnostics() will return 1 or 0 indicating whether an exception's gather_diagnostics() will be invoked when an exception is received by TCD. If called with arguments, then TCD will set the arguments that are specified. See Examples for how gather_diagnostics() can be called. =over =item Inputs =over =item is_allowed BOOLEAN, either 1 or 0. If allowed, then 1. If not allowed, then 0. =back =item Output Note: Output is most clearly illustrated in Examples. See note directly under gather_diagnostics, which describes what gets returned depending on the arguments. =item Examples my $gather_diagnostics = $Test->gather_diagnostics(); # returns 1 or 0 $Test->gather_diagnostics( is_allowed => 1 ); # sets preference to 1 =back =head2 get_param This method retrieves any environment variables that have been set. =over =item Inputs =item * $param The name of the parameter to retrieve =item * I<$default> value to use if parameter is not set. =item output Returns a value if successful. If an error occurred, the error flag is set, errmsg is set to the failure reason. =item Examples $volume=$Test->get_param("VOLUME","vol1"); if ($Test->error) { $Test->log($Test->errmsg); return $TCD::FAIL; } =back =head2 get_testcase Name of test case that is being executed =over =item Inputs none =item Outputs Returns the name of the test case. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $Test->nlog("Test case name is '".$Test->get_testcase()."'"); =back =head2 get_next_testcase Name of the next test case that will be executed =over =item Inputs none =item Outputs Returns the name of the next test case. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $Test->nlog("Next test case name is '".$Test->get_next_testcase()."'"); =back =head2 get_previous_testcase Name of the previous test case that was executed =over =item Inputs none =item Outputs Returns the name of the previous test case. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $Test->nlog("Previous Test case name is '".$Test->get_previous_testcase()."'"); =back =head2 get_previous_result Result of the previous test case that was executed =over =item Inputs none =item Outputs Returns the result of the previous test case. If an error occurs the error flag is set, errmsg is set to the failure reason, and "" is returned. =item Examples $result = $Test->get_previous_result(); =back =head2 get_worst_result Return the worst result value of all testcases that ran =over =item Inputs none =item Outputs A TCD Result value ($TCD::SCRIPT, $TCD::CONFIG, etc...) =item Examples $worst_result = $Test->get_worst_result(); if ($worst_result <= $TCD::FAIL) { # at least one test fataled } elsif ($worst_result == $TCD::PASS) { # all tests passed } =back =head2 worst_subtest_result This method return the worst result value of all the subtests that are passed in, and converts that NATE result into a TCD result. =over =item Inputs =item * @subtests An array of Subtest objects. =item Outputs The subtests passed in will be traversed and the worst NATE result of all subtests will be mapped to a TCD Result value ($TCD::SCRIPT, $TCD::CONFIG, etc...) based on the following mapping: NATE::Result::Script::severity = 6 ==> $TCD::SCRIPT NATE::Result::Config::severity = 5 ==> $TCD::CONFIG NATE::Result::Fatal::severity = 4 ==> $TCD::FATAL NATE::Result::Fail::severity = 4 ==> $TCD::FAIL NATE::Result::Inspect::severity = 3 ==> $TCD::FAIL NATE::Result::NA::severity = 2 ==> $TCD::NA NATE::Result::Warn::severity = 1 ==> $TCD::WARN NATE::Result::Info::severity = 0 ==> $TCD::PASS NATE::Result::Param::severity = 0 ==> $TCD::PASS NATE::Result::Pass::severity = 0 ==> $TCD::PASS =item Examples $worst_result = $Test->worst_subtest_result(@subtests); if ($worst_result <= $TCD::FAIL) { # at least one subtest failed } elsif ($worst_result == $TCD::PASS) { # all subtests passed } =back =head2 log This method logs information to both STDOUT and a test logfile opened by the TCD object. =over =item Inputs =item * @msg An array or scalar of information to be logged to a file for the test being run. =item Outputs None =item Examples # Determine if error occurred... $TCD->log( "This text will be written to a log file as well", "as STDOUT" ); =back =head2 ndisplay This method will log information to both STDOUT and the nate log. =over =item Inputs =item * @output An array or scalar of information to be logged to a file for the test being run. =item Outputs None =item Examples # Determine if error occurred... $TCD->ndisplay("This text will only be written to STDOUT and NATE log file"); =back =head2 nlog This method will log information to both nate log, testcase log, and STDOUT. =over =item Inputs =item * @buf An array or scalar of information to be logged for the test being run. =item Outputs None =item Examples # Determine if error occurred... $TCD->nlog( "This text will be written to a NATE, testcase log file, as well", "as STDOUT" ); =back =head2 read_config This method will read a config file of parameter value pairs and store them into the environment =over =item Inputs =item * $config A scalar value to a config file (*.cfg) containing param value pairs to be stored in the environment for the test to use. If only a file name is provied the default location of the config file is relative to the location of the test thpl being executed. This function is automaticaly called when a CONFIG is passed in on the command line. =item Outputs None =item Examples ... if the test was being run from /x/eng/testbeds/mylab/mytestbed # location of config file would be /u/knopp/p4/test/TCD/mytest.cfg ntest CONFIG=mytest.cfg TCD/template.thpl # location of config file would be # /x/eng/testbeds/mylab/mytestbed/mytest.cfg ntest CONFIG=./mytest.cfg TCD/template.thpl # location of config file would be /u/knopp/mytest.cfg ntest CONFIG=~/mytest.cfg TCD/template.thpl The following param value formats in the config file are acceptable. All strings after a comment character '#' are ignored. myparam=myvalue myparam=my multiword value myparam='my multiword value' myparam="my multiword value" myparam=my multiword value ### my parameter comment =back =head2 run_test This method executes the test names in the @TESTS array as identified on the command line. The following standard methods will automatically be called if defined in the testscript; init() and setup() before running the tests followed by cleanup() and uninit(). Standard methods init() and uninit() should be used for object creation and deletion. While setup() and cleanup() should be used for configuration and teardown. Each testmethod callback should return a 1 ($TCD::PASS) or 0 ($TCD::FAIL) to indicate the result of the test. If either of the configuration methods init() or setup() fail then the whole testcase is considered a failure and none of the test cases are executed. If any test method returns a fatal error ($TCD::FATAL) test execution is aborted and no cleanup() is performed. =over =item Inputs none =item Outputs None =item Examples # Tests to run # Use '-' character to disable test from running my @TESTS=qw(mytest1 mytest2 -mytest3); # TCD $Test = new TCD(-testcases=>[@TESTS]); if ($Test->error){ $Test->nlog($Test->errmsg); return $TCD::FAIL; } $Test->run_test(); if ($Test->error){ $Test->error($Test->errmsg); return $TCD::FAIL; } sub mytest1{ $Test->description("test1 descr"); $Test->nlog("test1 passes"); return $TCD::PASS; } sub mytest2{ $Test->description("test2 descr"); $Test->nlog("test2 fails"); return $TCD::FAIL; } sub mytest3{ $Test->description("test3 descr"); $Test->nlog("test3 disabled"); return $TCD::DISABLE; } =back =head2 step This method creates a standard format header for each testcase step in a testcase method. Sequential number of steps is maintained by TCD. =over =item Inputs =item * $descr|@arr Description of the test step =item Outputs None =item Examples $Test->step("Mount the NFS created filesystems"); =back =head2 timer_format This method returns a number as a formatted time value 'HH:MM:SS'. If the optional $minimize argument is sent and positive, we ruturn the time formatted like this: HHh:MMm:SSs without leading zeros =over =item Inputs =item * $seconds Number of seconds to format. =item * I<$minimize> Optional - use a format that eliminates leading zeros (3h22m11.1s) =item outputs Returns a format time value. If an error occurred, the error flag is set, errmsg is set to the failure reason, and 0 is passed back as the return value. =item Examples # run time $duration = $TCase->timer_stop("myTimer"); print "Time running:".$TCase->$timer_format($duration)." seconds\n"; =back =head2 timer_start This method sets time zero for a named timer. The timer used is a high resolution timer with millisecond accuracy. =over =item Inputs =item * $name Unique name to identify timer =item outputs If an error occurred, the error flag is set, errmsg is set to the failure reason, and 0 is passed back as the return value. =item Examples # init_time $myObj->timer_start("myTimer"); if ($TCase->error){ print $TCase->errmsg; return 0; } =back =head2 timer_stop This method returns the time duration since the timer was started. The timer used is a high resolution timer with millisecond accuracy. =over 4 =item Inputs =item * $name Unique name to identify timer =item outputs Returns the time in seconds that has expired since timer_start($name). If an error occurred, the error flag is set, errmsg is set to the failure reason, and 0 is passed back as the return value. =item Examples # run time my $duration = $myObj->timer_stop("myTimer"); print "Time running: $duration seconds\n"; =back =head1 Rerunning failed tests TCD supports rerunning the whole script, if any testcase failed in the current ntest run. The workflow is that, you run your TCD based scripts asusual and specify max no.of rerun attempts. After executing all the testcases if any testcase failed it will run all the testcases again. All the results except TCD::PASS, TCD::WARN, TCD::NA will be treated as failed in case of rerun. To selectively rerun the failed testcases alone, you can use use Driver's rerun feature. To use that you should convert TCD script to NATE::Driver script. Refer http://brewery.netapp.com/Brewery/QA/Tools/NATE/UserGuides/ECMP-007738.html Selective testcase rerun is not supported by TCD because it runs the testcases in a single process and historically, the testcases are developed to have dependency on each other for coding convenience, though that's considered as a bad coding style. =head2 Applicable usecases =over =item ntest which runs a NATE::Driver script which in turn runs TCD scripts. =item ntest which runs a TCD script =item ntest which runs stest which in turn runs TCD scripts through its blocks. =back =head2 Sample usage bash$ ntest TCD_based_script.thpl FILER=filer1 SOME_OTHER_PARAM=foo MAX_RERUN_ATTEMPTS=2 20151027 124845 [] {STAF_handle=>'814',name=>'bin/ntest(3018)',PID=>'3018', machine=>'scspn0025218001.lab.eng.btc.netapp.in'} ntest: Log directory is /x/eng/natelogs/user/20151027_124845 Note that the only change in the command line is 'MAX_RERUN_ATTEMPTS' specified additionally. MAX_RERUN_ATTEMPTS=n (n >= 0) will be max no.of rerun attempts. =head1 See Also Package inherits from the following modules. See these modules for further interface detail. L> =head1 AUTHOR/MAINTAINER =over =item NATE Development (dl-nate-dev@netapp.com) =back $Id: //depot/prod/DOT/dev/test/nate/lib/TCD.pm#1 $ Last revised $Date: 2016/04/12 $ by $Author: madhavs $ =cut