From 958d8435c257f93123dec83647130457816a23e6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 15:12:25 -0400 Subject: ktest: Remove commented exit A debug 'exit' was left in ktest.pl. Remove it. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 1 - 1 file changed, 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 292b13ad03f5..a40af07e7772 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2448,7 +2448,6 @@ sub create_config { } close(OUT); -# exit; make_oldconfig; } -- cgit v1.2.3 From 921ed4c7208e5c466a87db0a11c6fdd26bcc2fe7 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 15:18:27 -0400 Subject: ktest: Add PRE/POST_KTEST and TEST options In order to let the user add commands before and after ktest runs, the PRE_KTEST and POST_KTEST options are defined. They hold shell commands that will execute befor ktest runs its first test, as well as when it completed its last test. The PRE_TEST and POST_TEST will be run befor and after (respectively) for a given test. They can either be global (done for all tests) or defined by a single test. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 37 +++++++++++++++++++++++++++++++++++++ tools/testing/ktest/sample.conf | 30 ++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index a40af07e7772..31b941613f98 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -77,6 +77,11 @@ my $output_config; my $test_type; my $build_type; my $build_options; +my $final_post_ktest; +my $pre_ktest; +my $post_ktest; +my $pre_test; +my $post_test; my $pre_build; my $post_build; my $pre_build_die; @@ -197,6 +202,10 @@ my %option_map = ( "OUTPUT_DIR" => \$outputdir, "BUILD_DIR" => \$builddir, "TEST_TYPE" => \$test_type, + "PRE_KTEST" => \$pre_ktest, + "POST_KTEST" => \$post_ktest, + "PRE_TEST" => \$pre_test, + "POST_TEST" => \$post_test, "BUILD_TYPE" => \$build_type, "BUILD_OPTIONS" => \$build_options, "PRE_BUILD" => \$pre_build, @@ -1273,6 +1282,10 @@ sub save_logs { sub fail { + if (defined($post_test)) { + run_command $post_test; + } + if ($die_on_failure) { dodie @_; } @@ -1937,6 +1950,10 @@ sub halt { sub success { my ($i) = @_; + if (defined($post_test)) { + run_command $post_test; + } + $successes++; my $name = ""; @@ -3518,6 +3535,18 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $start_minconfig_defined = 1; + # The first test may override the PRE_KTEST option + if (defined($pre_ktest) && $i == 1) { + doprint "\n"; + run_command $pre_ktest; + } + + # Any test can override the POST_KTEST option + # The last test takes precedence. + if (defined($post_ktest)) { + $final_post_ktest = $post_ktest; + } + if (!defined($start_minconfig)) { $start_minconfig_defined = 0; $start_minconfig = $minconfig; @@ -3572,6 +3601,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { doprint "\n\n"; doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n"; + if (defined($pre_test)) { + run_command $pre_test; + } + unlink $dmesg; unlink $buildlog; unlink $testlog; @@ -3637,6 +3670,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { success $i; } +if (defined($final_post_ktest)) { + run_command $final_post_ktest; +} + if ($opt{"POWEROFF_ON_SUCCESS"}) { halt; } elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot && $reboot_success) { diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index cf362b3d1ec9..4472452f5be1 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -376,6 +376,24 @@ # DEFAULTS # DEFAULTS SKIP +# If you want to execute some command before the first test runs +# you can set this option. Note, it can be set as a default option +# or an option in the first test case. All other test cases will +# ignore it. If both the default and first test have this option +# set, then the first test will take precedence. +# +# default (undefined) +#PRE_KTEST = ${SSH} ~/set_up_test + +# If you want to execute some command after all the tests have +# completed, you can set this option. Note, it can be set as a +# default or any test case can override it. If multiple test cases +# set this option, then the last test case that set it will take +# precedence +# +# default (undefined) +#POST_KTEST = ${SSH} ~/dismantle_test + # The default test type (default test) # The test types may be: # build - only build the kernel, do nothing else @@ -426,6 +444,18 @@ # (default 0) #NO_INSTALL = 1 +# If there is a command that you want to run before the individual test +# case executes, then you can set this option +# +# default (undefined) +#PRE_TEST = ${SSH} reboot_to_special_kernel + +# If there is a command you want to run after the individual test case +# completes, then you can set this option. +# +# default (undefined) +#POST_TEST = cd ${BUILD_DIR}; git reset --hard + # If there is a script that you require to run before the build is done # you can specify it with PRE_BUILD. # -- cgit v1.2.3 From e5c2ec11a07b9e1e7eb714aad13583e2bbae49bd Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 15:22:05 -0400 Subject: ktest: Add PRE_INSTALL option Add the PRE_INSTALL option that will allow a user to specify a shell command to be executed before the install operation executes. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 8 ++++++++ tools/testing/ktest/sample.conf | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 31b941613f98..e91702eee580 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -106,6 +106,7 @@ my $grub_menu; my $grub_number; my $target; my $make; +my $pre_install; my $post_install; my $no_install; my $noclean; @@ -225,6 +226,7 @@ my %option_map = ( "ADD_CONFIG" => \$addconfig, "REBOOT_TYPE" => \$reboot_type, "GRUB_MENU" => \$grub_menu, + "PRE_INSTALL" => \$pre_install, "POST_INSTALL" => \$post_install, "NO_INSTALL" => \$no_install, "REBOOT_SCRIPT" => \$reboot_script, @@ -1669,6 +1671,12 @@ sub install { return if ($no_install); + if (defined($pre_install)) { + my $cp_pre_install = eval_kernel_version $pre_install; + run_command "$cp_pre_install" or + dodie "Failed to run pre install"; + } + my $cp_target = eval_kernel_version $target_image; run_scp_install "$outputdir/$build_target", "$cp_target" or diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 4472452f5be1..19754e0c8c64 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -426,6 +426,14 @@ # (default "") #BUILD_OPTIONS = -j20 +# If you need to do some special handling before installing +# you can add a script with this option. +# The environment variable KERNEL_VERSION will be set to the +# kernel version that is used. +# +# default (undefined) +#PRE_INSTALL = ssh user@target rm -rf '/lib/modules/*-test*' + # If you need an initrd, you can add a script or code here to install # it. The environment variable KERNEL_VERSION will be set to the # kernel version that is used. Remember to add the initrd line -- cgit v1.2.3 From b0918612545e698e55889c15d25e5118ea09c1fd Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 15:26:00 -0400 Subject: ktest: Add CONFIG_BISECT_CHECK option The config-bisect can take a bad config and bisect it down to find out what config actually breaks the config. But as all tests will apply a minconfig (defined by a user) to apply before booting, it is possible that the minconfig could actually make the bad config work (minconfigs can disable configs). The end result is that the config bisect test will not find a config that breaks. This can be rather frustrating to the user. The CONFIG_BISECT_CHECK option, when set to 1, will make sure that the bad config (with the minconfig applied) still fails before trying to bisect. And yes, I did get burned by this. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 14 ++++++++++++++ tools/testing/ktest/sample.conf | 6 ++++++ 2 files changed, 20 insertions(+) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index e91702eee580..8ce58d715aef 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -173,6 +173,7 @@ my $bisect_check; my $config_bisect; my $config_bisect_type; +my $config_bisect_check; my $patchcheck_type; my $patchcheck_start; @@ -283,6 +284,7 @@ my %option_map = ( "CONFIG_BISECT" => \$config_bisect, "CONFIG_BISECT_TYPE" => \$config_bisect_type, + "CONFIG_BISECT_CHECK" => \$config_bisect_check, "PATCHCHECK_TYPE" => \$patchcheck_type, "PATCHCHECK_START" => \$patchcheck_start, @@ -2743,6 +2745,18 @@ sub config_bisect { } } my $ret; + + if (defined($config_bisect_check) && $config_bisect_check) { + doprint " Checking to make sure bad config with min config fails\n"; + create_config keys %config_list; + $ret = run_config_bisect_test $config_bisect_type; + if ($ret) { + doprint " FAILED! Bad config with min config boots fine\n"; + return -1; + } + doprint " Bad config with min config fails as expected\n"; + } + do { $ret = run_config_bisect; } while (!$ret); diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 19754e0c8c64..e4a12da75ba6 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -1077,6 +1077,12 @@ # can specify it with CONFIG_BISECT_GOOD. Otherwise # the MIN_CONFIG is the base. # +# CONFIG_BISECT_CHECK (optional) +# Set this to 1 if you want to confirm that the config ktest +# generates (the bad config with the min config) is still bad. +# It may be that the min config fixes what broke the bad config +# and the test will not return a result. +# # Example: # TEST_START # TEST_TYPE = config_bisect -- cgit v1.2.3 From cf79fab676b3aa3b5fbae95aab25e2d4e26e4224 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 15:29:43 -0400 Subject: ktest: Fix config bisect with how make oldnoconfig works With a name like 'oldnoconfig' one may think that the config generated would disable all configs that were not defined (selecting "no" for all options). But this is not the case. It selects the default. If a config has a 'default y', then it is added if not specified. This broke the config bisect, because options not specified by a config will just use the default, where it expected to turn off. This caused an option to be enabled that disabled an option that would break the build. The end result was that we never found the bad config at the end of the test. Instead of using 'make oldnoconfig', ktest now builds the options it expects enabled and disabled. When it turns off an option, it will no longer remove it, but actually set it to: # CONFIG_FOO is not set. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 46 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 8ce58d715aef..5ad891a08113 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -189,6 +189,9 @@ my $newconfig = 0; my %entered_configs; my %config_help; my %variable; + +# force_config is the list of configs that we force enabled (or disabled) +# in a .config file. The MIN_CONFIG and ADD_CONFIG configs. my %force_config; # do not force reboots on config problems @@ -1837,6 +1840,7 @@ sub make_oldconfig { sub load_force_config { my ($config) = @_; + doprint "Loading force configs from $config\n"; open(IN, $config) or dodie "failed to read $config"; while () { @@ -2389,9 +2393,24 @@ sub bisect { success $i; } +# config_ignore holds the configs that were set (or unset) for +# a good config and we will ignore these configs for the rest +# of a config bisect. These configs stay as they were. my %config_ignore; + +# config_set holds what all configs were set as. my %config_set; +# config_off holds the set of configs that the bad config had disabled. +# We need to record them and set them in the .config when running +# oldnoconfig, because oldnoconfig does not turn off new symbols, but +# instead just keeps the defaults. +my %config_off; + +# config_off_tmp holds a set of configs to turn off for now +my @config_off_tmp; + +# config_list is the set of configs that are being tested my %config_list; my %null_config; @@ -2470,6 +2489,16 @@ sub create_config { } } + # turn off configs to keep off + foreach my $config (keys %config_off) { + print OUT "# $config is not set\n"; + } + + # turn off configs that should be off for now + foreach my $config (@config_off_tmp) { + print OUT "# $config is not set\n"; + } + foreach my $config (keys %config_ignore) { print OUT "$config_ignore{$config}\n"; } @@ -2551,6 +2580,13 @@ sub run_config_bisect { do { my @tophalf = @start_list[0 .. $half]; + # keep the bottom half off + if ($half < $#start_list) { + @config_off_tmp = @start_list[$half + 1 .. $#start_list]; + } else { + @config_off_tmp = (); + } + create_config @tophalf; read_current_config \%current_config; @@ -2567,7 +2603,11 @@ sub run_config_bisect { if (!$found) { # try the other half doprint "Top half produced no set configs, trying bottom half\n"; + + # keep the top half off + @config_off_tmp = @tophalf; @tophalf = @start_list[$half + 1 .. $#start_list]; + create_config @tophalf; read_current_config \%current_config; foreach my $config (@tophalf) { @@ -2705,6 +2745,10 @@ sub config_bisect { $added_configs{$2} = $1; $config_list{$2} = $1; } + } elsif (/^# ((CONFIG\S*).*)/) { + # Keep these configs disabled + $config_set{$2} = $1; + $config_off{$2} = $1; } } close(IN); @@ -2727,6 +2771,8 @@ sub config_bisect { my %config_test; my $once = 0; + @config_off_tmp = (); + # Sometimes kconfig does weird things. We must make sure # that the config we autocreate has everything we need # to test, otherwise we may miss testing configs, or -- cgit v1.2.3 From 407b95b7a085b5c1622033edc2720bb05f973317 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 16:05:42 -0400 Subject: ktest: Add MAX_MONITOR_WAIT option If the console is constantly outputting content, this can cause ktest to get stuck waiting on the monitor to settle down. The option MAX_MONITOR_WAIT is the maximum time (in seconds) for ktest to wait for the console to flush. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 16 +++++++++++++++- tools/testing/ktest/sample.conf | 8 ++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 5ad891a08113..d10fff12bc24 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -52,6 +52,7 @@ my %default = ( "STOP_AFTER_SUCCESS" => 10, "STOP_AFTER_FAILURE" => 60, "STOP_TEST_AFTER" => 600, + "MAX_MONITOR_WAIT" => 1800, # required, and we will ask users if they don't have them but we keep the default # value something that is common. @@ -98,6 +99,7 @@ my $reboot_on_success; my $die_on_failure; my $powercycle_after_reboot; my $poweroff_after_halt; +my $max_monitor_wait; my $ssh_exec; my $scp_to_target; my $scp_to_target_install; @@ -243,6 +245,7 @@ my %option_map = ( "POWER_OFF" => \$power_off, "POWERCYCLE_AFTER_REBOOT" => \$powercycle_after_reboot, "POWEROFF_AFTER_HALT" => \$poweroff_after_halt, + "MAX_MONITOR_WAIT" => \$max_monitor_wait, "SLEEP_TIME" => \$sleep_time, "BISECT_SLEEP_TIME" => \$bisect_sleep_time, "PATCHCHECK_SLEEP_TIME" => \$patchcheck_sleep_time, @@ -1133,7 +1136,10 @@ sub reboot { } if (defined($time)) { - wait_for_monitor($time, $reboot_success_line); + if (wait_for_monitor($time, $reboot_success_line)) { + # reboot got stuck? + run_command "$power_cycle"; + } end_monitor; } } @@ -1228,6 +1234,8 @@ sub wait_for_monitor { my $full_line = ""; my $line; my $booted = 0; + my $start_time = time; + my $now; doprint "** Wait for monitor to settle down **\n"; @@ -1246,8 +1254,14 @@ sub wait_for_monitor { if ($line =~ /\n/) { $full_line = ""; } + $now = time; + if ($now - $start_time >= $max_monitor_wait) { + doprint "Exiting monitor flush due to hitting MAX_MONITOR_WAIT\n"; + return 1; + } } print "** Monitor flushed **\n"; + return 0; } sub save_logs { diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index e4a12da75ba6..de28a0a3b8fc 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -695,6 +695,14 @@ # (default 60) #BISECT_SLEEP_TIME = 60 +# The max wait time (in seconds) for waiting for the console to finish. +# If for some reason, the console is outputting content without +# ever finishing, this will cause ktest to get stuck. This +# option is the max time ktest will wait for the monitor (console) +# to settle down before continuing. +# (default 1800) +#MAX_MONITOR_WAIT + # The time in between patch checks to sleep (in seconds) # (default 60) #PATCHCHECK_SLEEP_TIME = 60 -- cgit v1.2.3 From 8a80c72711a9b78af433013067848c0a5473a484 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 19 Jul 2012 16:08:33 -0400 Subject: ktest: Add check for bug or panic during reboot Usually the target is booted into a dependable kernel when a test starts. The test will install the test kernel and reboot the box. But there may be a time that the kernel is running an unreliable kernel and the reboot may crash. Have ktest detect crashes on a reboot and force a power-cycle instead. This can usually happen if a test kernel was installed to run manual tests, but the user forgot to reboot to the known good kernel. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index d10fff12bc24..c8a42d5991be 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1138,6 +1138,7 @@ sub reboot { if (defined($time)) { if (wait_for_monitor($time, $reboot_success_line)) { # reboot got stuck? + doprint "Reboot did not finish. Forcing power cycle\n"; run_command "$power_cycle"; } end_monitor; @@ -1235,6 +1236,9 @@ sub wait_for_monitor { my $line; my $booted = 0; my $start_time = time; + my $skip_call_trace = 0; + my $bug = 0; + my $bug_ignored = 0; my $now; doprint "** Wait for monitor to settle down **\n"; @@ -1251,6 +1255,28 @@ sub wait_for_monitor { $booted = 1; } + if ($full_line =~ /\[ backtrace testing \]/) { + $skip_call_trace = 1; + } + + if ($full_line =~ /call trace:/i) { + if (!$bug && !$skip_call_trace) { + if ($ignore_errors) { + $bug_ignored = 1; + } else { + $bug = 1; + } + } + } + + if ($full_line =~ /\[ end of backtrace testing \]/) { + $skip_call_trace = 0; + } + + if ($full_line =~ /Kernel panic -/) { + $bug = 1; + } + if ($line =~ /\n/) { $full_line = ""; } @@ -1261,7 +1287,7 @@ sub wait_for_monitor { } } print "** Monitor flushed **\n"; - return 0; + return $bug; } sub save_logs { -- cgit v1.2.3 From c1434dcc57f97b0e533dedb8814a76ef13e702b4 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 20 Jul 2012 22:39:16 -0400 Subject: ktest: Reset saved min (force) configs for each test The min configs are saved in a perl hash called force_configs, and this hash is used to add configs to the .config file. But it was not being reset between tests and a min config from a previous test would affect the min config of the next test causing undesirable results. Reset the force_config hash at the start of each test. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index c8a42d5991be..c444c4fcc8c4 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -3634,6 +3634,8 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { $iteration = $i; + undef %force_config; + my $makecmd = set_test_option("MAKE_CMD", $i); # Load all the options into their mapped variable names -- cgit v1.2.3 From 9b1d367dbbeb6646f04a8865ecc2bc454f7dd88f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 30 Jul 2012 14:30:53 -0400 Subject: ktest: Ignore errors it tests if IGNORE_ERRORS is set The option IGNORE_ERRORS is used to allow a test to succeed even if a warning appears from the kernel. Sometimes kernels will produce warnings that are not associated with a test, and the user wants to test something else. The IGNORE_ERRORS works for boot up, but was not preventing test runs to succeed if the kernel produced a warning. Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index c444c4fcc8c4..a022fb7d113d 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2074,6 +2074,7 @@ sub do_run_test { my $line; my $full_line; my $bug = 0; + my $bug_ignored = 0; wait_for_monitor 1; @@ -2098,7 +2099,11 @@ sub do_run_test { doprint $line; if ($full_line =~ /call trace:/i) { - $bug = 1; + if ($ignore_errors) { + $bug_ignored = 1; + } else { + $bug = 1; + } } if ($full_line =~ /Kernel panic -/) { @@ -2111,6 +2116,10 @@ sub do_run_test { } } while (!$child_done && !$bug); + if (!$bug && $bug_ignored) { + doprint "WARNING: Call Trace detected but ignored due to IGNORE_ERRORS=1\n"; + } + if ($bug) { my $failure_start = time; my $now; -- cgit v1.2.3 From 8fddbe9bbfe5771a9d9e5d0c6f5bae3213c20645 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 30 Jul 2012 14:37:01 -0400 Subject: ktest: Allow perl regex expressions in conditional statements Add '=~' and '!~' to the list of allowed conditionals for DEFAULT and TEST_START section if statements. ie. TEST_START IF TEST =~ .*test$ Signed-off-by: Steven Rostedt --- tools/testing/ktest/ktest.pl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index a022fb7d113d..52b7959cd513 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -623,6 +623,10 @@ sub process_compare { return $lval eq $rval; } elsif ($cmp eq "!=") { return $lval ne $rval; + } elsif ($cmp eq "=~") { + return $lval =~ m/$rval/; + } elsif ($cmp eq "!~") { + return $lval !~ m/$rval/; } my $statement = "$lval $cmp $rval"; @@ -678,7 +682,7 @@ sub process_expression { } } - if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) { + if ($val =~ /(.*)(==|\!=|>=|<=|>|<|=~|\!~)(.*)/) { my $ret = process_compare($1, $2, $3); if ($ret < 0) { die "$name: $.: Unable to process comparison\n"; -- cgit v1.2.3