blob: 0e5f764ac9eedf967a9b160b95e19c73d9b87218 [file] [log] [blame]
#
# Config file for ktest.pl
#
# Note, all paths must be absolute
#
# Options set in the beginning of the file are considered to be
# default options. These options can be overriden by test specific
# options, with the following exceptions:
#
# LOG_FILE
# CLEAR_LOG
# POWEROFF_ON_SUCCESS
# REBOOT_ON_SUCCESS
#
# Test specific options are set after the label:
#
# TEST_START
#
# The options after a TEST_START label are specific to that test.
# Each TEST_START label will set up a new test. If you want to
# perform a test more than once, you can add the ITERATE label
# to it followed by the number of times you want that test
# to iterate. If the ITERATE is left off, the test will only
# be performed once.
#
# TEST_START ITERATE 10
#
# You can skip a test by adding SKIP (before or after the ITERATE
# and number)
#
# TEST_START SKIP
#
# TEST_START SKIP ITERATE 10
#
# TEST_START ITERATE 10 SKIP
#
# The SKIP label causes the options and the test itself to be ignored.
# This is useful to set up several different tests in one config file, and
# only enabling the ones you want to use for a current test run.
#
# You can add default options anywhere in the file as well
# with the DEFAULTS tag. This allows you to have default options
# after the test options to keep the test options at the top
# of the file. You can even place the DEFAULTS tag between
# test cases (but not in the middle of a single test case)
#
# TEST_START
# MIN_CONFIG = /home/test/config-test1
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-default
#
# TEST_START ITERATE 10
#
# The above will run the first test with MIN_CONFIG set to
# /home/test/config-test-1. Then 10 tests will be executed
# with MIN_CONFIG with /home/test/config-default.
#
# You can also disable defaults with the SKIP option
#
# DEFAULTS SKIP
# MIN_CONFIG = /home/test/config-use-sometimes
#
# DEFAULTS
# MIN_CONFIG = /home/test/config-most-times
#
# The above will ignore the first MIN_CONFIG. If you want to
# use the first MIN_CONFIG, remove the SKIP from the first
# DEFAULTS tag and add it to the second. Be careful, options
# may only be declared once per test or default. If you have
# the same option name under the same test or as default
# ktest will fail to execute, and no tests will run.
#
#### Config variables ####
#
# This config file can also contain "config variables".
# These are assigned with ":=" instead of the ktest option
# assigment "=".
#
# The difference between ktest options and config variables
# is that config variables can be used multiple times,
# where each instance will override the previous instance.
# And that they only live at time of processing this config.
#
# The advantage to config variables are that they can be used
# by any option or any other config variables to define thing
# that you may use over and over again in the options.
#
# For example:
#
# USER := root
# TARGET := mybox
# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test
#
# TEST_START
# MIN_CONFIG = config1
# TEST = ${TEST_CASE}
#
# TEST_START
# MIN_CONFIG = config2
# TEST = ${TEST_CASE}
#
# TEST_CASE := ssh ${USER}@${TARGET} /path/to/my/test2
#
# TEST_START
# MIN_CONFIG = config1
# TEST = ${TEST_CASE}
#
# TEST_START
# MIN_CONFIG = config2
# TEST = ${TEST_CASE}
#
# TEST_DIR := /home/me/test
#
# BUILD_DIR = ${TEST_DIR}/linux.git
# OUTPUT_DIR = ${TEST_DIR}/test
#
# Note, the config variables are evaluated immediately, thus
# updating TARGET after TEST_CASE has been assigned does nothing
# to TEST_CASE.
#
# As shown in the example, to evaluate a config variable, you
# use the ${X} convention. Simple $X will not work.
#
# If the config variable does not exist, the ${X} will not
# be evaluated. Thus:
#
# MAKE_CMD = PATH=/mypath:${PATH} make
#
# If PATH is not a config variable, then the ${PATH} in
# the MAKE_CMD option will be evaluated by the shell when
# the MAKE_CMD option is passed into shell processing.
#### Using options in other options ####
#
# Options that are defined in the config file may also be used
# by other options. All options are evaulated at time of
# use (except that config variables are evaluated at config
# processing time).
#
# If an ktest option is used within another option, instead of
# typing it again in that option you can simply use the option
# just like you can config variables.
#
# MACHINE = mybox
#
# TEST = ssh root@${MACHINE} /path/to/test
#
# The option will be used per test case. Thus:
#
# TEST_TYPE = test
# TEST = ssh root@{MACHINE}
#
# TEST_START
# MACHINE = box1
#
# TEST_START
# MACHINE = box2
#
# For both test cases, MACHINE will be evaluated at the time
# of the test case. The first test will run ssh root@box1
# and the second will run ssh root@box2.
#### Mandatory Default Options ####
# These options must be in the default section, although most
# may be overridden by test options.
# The machine hostname that you will test
#MACHINE = target
# The box is expected to have ssh on normal bootup, provide the user
# (most likely root, since you need privileged operations)
#SSH_USER = root
# The directory that contains the Linux source code
#BUILD_DIR = /home/test/linux.git
# The directory that the objects will be built
# (can not be same as BUILD_DIR)
#OUTPUT_DIR = /home/test/build/target
# The location of the compiled file to copy to the target
# (relative to OUTPUT_DIR)
#BUILD_TARGET = arch/x86/boot/bzImage
# The place to put your image on the test machine
#TARGET_IMAGE = /boot/vmlinuz-test
# A script or command to reboot the box
#
# Here is a digital loggers power switch example
#POWER_CYCLE = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
#
# Here is an example to reboot a virtual box on the current host
# with the name "Guest".
#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
# The script or command that reads the console
#
# If you use ttywatch server, something like the following would work.
#CONSOLE = nc -d localhost 3001
#
# For a virtual machine with guest name "Guest".
#CONSOLE = virsh console Guest
# Required version ending to differentiate the test
# from other linux builds on the system.
#LOCALVERSION = -test
# The grub title name for the test kernel to boot
# (Only mandatory if REBOOT_TYPE = grub)
#
# Note, ktest.pl will not update the grub menu.lst, you need to
# manually add an option for the test. ktest.pl will search
# the grub menu.lst for this option to find what kernel to
# reboot into.
#
# For example, if in the /boot/grub/menu.lst the test kernel title has:
# title Test Kernel
# kernel vmlinuz-test
#GRUB_MENU = Test Kernel
# A script to reboot the target into the test kernel
# (Only mandatory if REBOOT_TYPE = script)
#REBOOT_SCRIPT =
#### Optional Config Options (all have defaults) ####
# Start a test setup. If you leave this off, all options
# will be default and the test will run once.
# This is a label and not really an option (it takes no value).
# You can append ITERATE and a number after it to iterate the
# test a number of times, or SKIP to ignore this test.
#
#TEST_START
#TEST_START ITERATE 5
#TEST_START SKIP
# Have the following options as default again. Used after tests
# have already been defined by TEST_START. Optionally, you can
# just define all default options before the first TEST_START
# and you do not need this option.
#
# This is a label and not really an option (it takes no value).
# You can append SKIP to this label and the options within this
# section will be ignored.
#
# DEFAULTS
# DEFAULTS SKIP
# The default test type (default test)
# The test types may be:
# build - only build the kernel, do nothing else
# boot - build and boot the kernel
# test - build, boot and if TEST is set, run the test script
# (If TEST is not set, it defaults back to boot)
# bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
# patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
#TEST_TYPE = test
# Test to run if there is a successful boot and TEST_TYPE is test.
# Must exit with 0 on success and non zero on error
# default (undefined)
#TEST = ssh user@machine /root/run_test
# The build type is any make config type or special command
# (default randconfig)
# nobuild - skip the clean and build step
# useconfig:/path/to/config - use the given config and run
# oldconfig on it.
# This option is ignored if TEST_TYPE is patchcheck or bisect
#BUILD_TYPE = randconfig
# The make command (default make)
# If you are building a 32bit x86 on a 64 bit host
#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
# Any build options for the make of the kernel (not for other makes, like configs)
# (default "")
#BUILD_OPTIONS = -j20
# If you need an initrd, you can add a script or code here to install
# it. The environment variable KERNEL_VERSION will be set to the
# kernel version that is used. Remember to add the initrd line
# to your grub menu.lst file.
#
# Here's a couple of examples to use:
#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
#
# or on some systems:
#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
# Way to reboot the box to the test kernel.
# Only valid options so far are "grub" and "script"
# (default grub)
# If you specify grub, it will assume grub version 1
# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
# and select that target to reboot to the kernel. If this is not
# your setup, then specify "script" and have a command or script
# specified in REBOOT_SCRIPT to boot to the target.
#
# The entry in /boot/grub/menu.lst must be entered in manually.
# The test will not modify that file.
#REBOOT_TYPE = grub
# The min config that is needed to build for the machine
# A nice way to create this is with the following:
#
# $ ssh target
# $ lsmod > mymods
# $ scp mymods host:/tmp
# $ exit
# $ cd linux.git
# $ rm .config
# $ make LSMOD=mymods localyesconfig
# $ grep '^CONFIG' .config > /home/test/config-min
#
# If you want even less configs:
#
# log in directly to target (do not ssh)
#
# $ su
# # lsmod | cut -d' ' -f1 | xargs rmmod
#
# repeat the above several times
#
# # lsmod > mymods
# # reboot
#
# May need to reboot to get your network back to copy the mymods
# to the host, and then remove the previous .config and run the
# localyesconfig again. The CONFIG_MIN generated like this will
# not guarantee network activity to the box so the TEST_TYPE of
# test may fail.
#
# You might also want to set:
# CONFIG_CMDLINE="<your options here>"
# randconfig may set the above and override your real command
# line options.
# (default undefined)
#MIN_CONFIG = /home/test/config-min
# Sometimes there's options that just break the boot and
# you do not care about. Here are a few:
# # CONFIG_STAGING is not set
# Staging drivers are horrible, and can break the build.
# # CONFIG_SCSI_DEBUG is not set
# SCSI_DEBUG may change your root partition
# # CONFIG_KGDB_SERIAL_CONSOLE is not set
# KGDB may cause oops waiting for a connection that's not there.
# This option points to the file containing config options that will be prepended
# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
#
# Note, config options in MIN_CONFIG will override these options.
#
# (default undefined)
#ADD_CONFIG = /home/test/config-broken
# The location on the host where to write temp files
# (default /tmp/ktest)
#TMP_DIR = /tmp/ktest
# Optional log file to write the status (recommended)
# Note, this is a DEFAULT section only option.
# (default undefined)
#LOG_FILE = /home/test/logfiles/target.log
# Remove old logfile if it exists before starting all tests.
# Note, this is a DEFAULT section only option.
# (default 0)
#CLEAR_LOG = 0
# Line to define a successful boot up in console output.
# This is what the line contains, not the entire line. If you need
# the entire line to match, then use regural expression syntax like:
# (do not add any quotes around it)
#
# SUCCESS_LINE = ^MyBox Login:$
#
# (default "login:")
#SUCCESS_LINE = login:
# In case the console constantly fills the screen, having
# a specified time to stop the test after success is recommended.
# (in seconds)
# (default 10)
#STOP_AFTER_SUCCESS = 10
# In case the console constantly fills the screen, having
# a specified time to stop the test after failure is recommended.
# (in seconds)
# (default 60)
#STOP_AFTER_FAILURE = 60
# In case the console constantly fills the screen, having
# a specified time to stop the test if it never succeeds nor fails
# is recommended.
# Note: this is ignored if a success or failure is detected.
# (in seconds)
# (default 600, -1 is to never stop)
#STOP_TEST_AFTER = 600
# Stop testing if a build fails. If set, the script will end if
# a failure is detected, otherwise it will save off the .config,
# dmesg and bootlog in a directory called
# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
# if the STORE_FAILURES directory is set.
# (default 1)
# Note, even if this is set to zero, there are some errors that still
# stop the tests.
#DIE_ON_FAILURE = 1
# Directory to store failure directories on failure. If this is not
# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
# (default undefined)
#STORE_FAILURES = /home/test/failures
# Build without doing a make mrproper, or removing .config
# (default 0)
#BUILD_NOCLEAN = 0
# As the test reads the console, after it hits the SUCCESS_LINE
# the time it waits for the monitor to settle down between reads
# can usually be lowered.
# (in seconds) (default 1)
#BOOTED_TIMEOUT = 1
# The timeout in seconds when we consider the box hung after
# the console stop producing output. Be sure to leave enough
# time here to get pass a reboot. Some machines may not produce
# any console output for a long time during a reboot. You do
# not want the test to fail just because the system was in
# the process of rebooting to the test kernel.
# (default 120)
#TIMEOUT = 120
# In between tests, a reboot of the box may occur, and this
# is the time to wait for the console after it stops producing
# output. Some machines may not produce a large lag on reboot
# so this should accommodate it.
# The difference between this and TIMEOUT, is that TIMEOUT happens
# when rebooting to the test kernel. This sleep time happens
# after a test has completed and we are about to start running
# another test. If a reboot to the reliable kernel happens,
# we wait SLEEP_TIME for the console to stop producing output
# before starting the next test.
# (default 60)
#SLEEP_TIME = 60
# The time in between bisects to sleep (in seconds)
# (default 60)
#BISECT_SLEEP_TIME = 60
# The time in between patch checks to sleep (in seconds)
# (default 60)
#PATCHCHECK_SLEEP_TIME = 60
# Reboot the target box on error (default 0)
#REBOOT_ON_ERROR = 0
# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_ERROR = 0
# Power off the target after all tests have completed successfully
# Note, this is a DEFAULT section only option.
# (default 0)
#POWEROFF_ON_SUCCESS = 0
# Reboot the target after all test completed successfully (default 1)
# (ignored if POWEROFF_ON_SUCCESS is set)
#REBOOT_ON_SUCCESS = 1
# In case there are isses with rebooting, you can specify this
# to always powercycle after this amount of time after calling
# reboot.
# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
# makes it powercycle immediately after rebooting. Do not define
# it if you do not want it.
# (default undefined)
#POWERCYCLE_AFTER_REBOOT = 5
# In case there's isses with halting, you can specify this
# to always poweroff after this amount of time after calling
# halt.
# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
# makes it poweroff immediately after halting. Do not define
# it if you do not want it.
# (default undefined)
#POWEROFF_AFTER_HALT = 20
# A script or command to power off the box (default undefined)
# Needed for POWEROFF_ON_ERROR and SUCCESS
#
# Example for digital loggers power switch:
#POWER_OFF = wget --no-proxy -O /dev/null -q --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
#
# Example for a virtual guest call "Guest".
#POWER_OFF = virsh destroy Guest
# The way to execute a command on the target
# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
# The way to copy a file to the target
# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
# The nice way to reboot the target
# (default ssh $SSH_USER@$MACHINE reboot)
# The variables SSH_USER and MACHINE are defined.
#REBOOT = ssh $SSH_USER@$MACHINE reboot
# The way triple faults are detected is by testing the kernel
# banner. If the kernel banner for the kernel we are testing is
# found, and then later a kernel banner for another kernel version
# is found, it is considered that we encountered a triple fault,
# and there is no panic or callback, but simply a reboot.
# To disable this (because it did a false positive) set the following
# to 0.
# (default 1)
#DETECT_TRIPLE_FAULT = 0
#### Per test run options ####
# The following options are only allowed in TEST_START sections.
# They are ignored in the DEFAULTS sections.
#
# All of these are optional and undefined by default, although
# some of these options are required for TEST_TYPE of patchcheck
# and bisect.
#
#
# CHECKOUT = branch
#
# If the BUILD_DIR is a git repository, then you can set this option
# to checkout the given branch before running the TEST. If you
# specify this for the first run, that branch will be used for
# all preceding tests until a new CHECKOUT is set.
#
#
# TEST_NAME = name
#
# If you want the test to have a name that is displayed in
# the test result banner at the end of the test, then use this
# option. This is useful to search for the RESULT keyword and
# not have to translate a test number to a test in the config.
#
# For TEST_TYPE = patchcheck
#
# This expects the BUILD_DIR to be a git repository, and
# will checkout the PATCHCHECK_START commit.
#
# The option BUILD_TYPE will be ignored.
#
# The MIN_CONFIG will be used for all builds of the patchcheck. The build type
# used for patchcheck is oldconfig.
#
# PATCHCHECK_START is required and is the first patch to
# test (the SHA1 of the commit). You may also specify anything
# that git checkout allows (branch name, tage, HEAD~3).
#
# PATCHCHECK_END is the last patch to check (default HEAD)
#
# PATCHCHECK_TYPE is required and is the type of test to run:
# build, boot, test.
#
# Note, the build test will look for warnings, if a warning occurred
# in a file that a commit touches, the build will fail.
#
# If BUILD_NOCLEAN is set, then make mrproper will not be run on
# any of the builds, just like all other TEST_TYPE tests. But
# what makes patchcheck different from the other tests, is if
# BUILD_NOCLEAN is not set, only the first and last patch run
# make mrproper. This helps speed up the test.
#
# Example:
# TEST_START
# TEST_TYPE = patchcheck
# CHECKOUT = mybranch
# PATCHCHECK_TYPE = boot
# PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
# PATCHCHECK_END = HEAD~2
#
#
#
# For TEST_TYPE = bisect
#
# You can specify a git bisect if the BUILD_DIR is a git repository.
# The MIN_CONFIG will be used for all builds of the bisect. The build type
# used for bisecting is oldconfig.
#
# The option BUILD_TYPE will be ignored.
#
# BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
#
# The above three options are required for a bisect operation.
#
# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
#
# If an operation failed in the bisect that was not expected to
# fail. Then the test ends. The state of the BUILD_DIR will be
# left off at where the failure occurred. You can examine the
# reason for the failure, and perhaps even find a git commit
# that would work to continue with. You can run:
#
# git bisect log > /path/to/replay/file
#
# The adding:
#
# BISECT_REPLAY= /path/to/replay/file
#
# And running the test again. The test will perform the initial
# git bisect start, git bisect good, and git bisect bad, and
# then it will run git bisect replay on this file, before
# continuing with the bisect.
#
# BISECT_START = commit (optional, default undefined)
#
# As with BISECT_REPLAY, if the test failed on a commit that
# just happen to have a bad commit in the middle of the bisect,
# and you need to skip it. If BISECT_START is defined, it
# will checkout that commit after doing the initial git bisect start,
# git bisect good, git bisect bad, and running the git bisect replay
# if the BISECT_REPLAY is set.
#
# BISECT_SKIP = 1 (optional, default 0)
#
# If BISECT_TYPE is set to test but the build fails, ktest will
# simply fail the test and end their. You could use BISECT_REPLAY
# and BISECT_START to resume after you found a new starting point,
# or you could set BISECT_SKIP to 1. If BISECT_SKIP is set to 1,
# when something other than the BISECT_TYPE fails, ktest.pl will
# run "git bisect skip" and try again.
#
# BISECT_FILES = <path> (optional, default undefined)
#
# To just run the git bisect on a specific path, set BISECT_FILES.
# For example:
#
# BISECT_FILES = arch/x86 kernel/time
#
# Will run the bisect with "git bisect start -- arch/x86 kernel/time"
#
# BISECT_REVERSE = 1 (optional, default 0)
#
# In those strange instances where it was broken forever
# and you are trying to find where it started to work!
# Set BISECT_GOOD to the commit that was last known to fail
# Set BISECT_BAD to the commit that is known to start working.
# With BISECT_REVERSE = 1, The test will consider failures as
# good, and success as bad.
#
# BISECT_MANUAL = 1 (optional, default 0)
#
# In case there's a problem with automating the bisect for
# whatever reason. (Can't reboot, want to inspect each iteration)
# Doing a BISECT_MANUAL will have the test wait for you to
# tell it if the test passed or failed after each iteration.
# This is basicall the same as running git bisect yourself
# but ktest will rebuild and install the kernel for you.
#
# BISECT_CHECK = 1 (optional, default 0)
#
# Just to be sure the good is good and bad is bad, setting
# BISECT_CHECK to 1 will start the bisect by first checking
# out BISECT_BAD and makes sure it fails, then it will check
# out BISECT_GOOD and makes sure it succeeds before starting
# the bisect (it works for BISECT_REVERSE too).
#
# You can limit the test to just check BISECT_GOOD or
# BISECT_BAD with BISECT_CHECK = good or
# BISECT_CHECK = bad, respectively.
#
# Example:
# TEST_START
# TEST_TYPE = bisect
# BISECT_GOOD = v2.6.36
# BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
# BISECT_TYPE = build
# MIN_CONFIG = /home/test/config-bisect
#
#
#
# For TEST_TYPE = config_bisect
#
# In those cases that you have two different configs. One of them
# work, the other does not, and you do not know what config causes
# the problem.
# The TEST_TYPE config_bisect will bisect the bad config looking for
# what config causes the failure.
#
# The way it works is this:
#
# First it finds a config to work with. Since a different version, or
# MIN_CONFIG may cause different dependecies, it must run through this
# preparation.
#
# Overwrites any config set in the bad config with a config set in
# either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
# are minimal and do not disable configs you want to test:
# (ie. # CONFIG_FOO is not set).
#
# An oldconfig is run on the bad config and any new config that
# appears will be added to the configs to test.
#
# Finally, it generates a config with the above result and runs it
# again through make oldconfig to produce a config that should be
# satisfied by kconfig.
#
# Then it starts the bisect.
#
# The configs to test are cut in half. If all the configs in this
# half depend on a config in the other half, then the other half
# is tested instead. If no configs are enabled by either half, then
# this means a circular dependency exists and the test fails.
#
# A config is created with the test half, and the bisect test is run.
#
# If the bisect succeeds, then all configs in the generated config
# are removed from the configs to test and added to the configs that
# will be enabled for all builds (they will be enabled, but not be part
# of the configs to examine).
#
# If the bisect fails, then all test configs that were not enabled by
# the config file are removed from the test. These configs will not
# be enabled in future tests. Since current config failed, we consider
# this to be a subset of the config that we started with.
#
# When we are down to one config, it is considered the bad config.
#
# Note, the config chosen may not be the true bad config. Due to
# dependencies and selections of the kbuild system, mulitple
# configs may be needed to cause a failure. If you disable the
# config that was found and restart the test, if the test fails
# again, it is recommended to rerun the config_bisect with a new
# bad config without the found config enabled.
#
# The option BUILD_TYPE will be ignored.
#
# CONFIG_BISECT_TYPE is the type of test to perform:
# build - bad fails to build
# boot - bad builds but fails to boot
# test - bad boots but fails a test
#
# CONFIG_BISECT is the config that failed to boot
#
# If BISECT_MANUAL is set, it will pause between iterations.
# This is useful to use just ktest.pl just for the config bisect.
# If you set it to build, it will run the bisect and you can
# control what happens in between iterations. It will ask you if
# the test succeeded or not and continue the config bisect.
#
# CONFIG_BISECT_GOOD (optional)
# If you have a good config to start with, then you
# can specify it with CONFIG_BISECT_GOOD. Otherwise
# the MIN_CONFIG is the base.
#
# Example:
# TEST_START
# TEST_TYPE = config_bisect
# CONFIG_BISECT_TYPE = build
# CONFIG_BISECT = /home/test/Ā¢onfig-bad
# MIN_CONFIG = /home/test/config-min
# BISECT_MANUAL = 1
#