Yesudeep Mangalapilly | 8e923ca | 2021-10-25 06:12:41 -0700 | [diff] [blame] | 1 | #!/usr/bin/env bash |
c-parsons | 7a536d3 | 2019-02-11 17:18:56 -0500 | [diff] [blame] | 2 | # |
| 3 | # Copyright 2015 The Bazel Authors. All rights reserved. |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | # you may not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # |
| 17 | # Common utility file for Bazel shell tests |
| 18 | # |
| 19 | # unittest.bash: a unit test framework in Bash. |
| 20 | # |
| 21 | # A typical test suite looks like so: |
| 22 | # |
| 23 | # ------------------------------------------------------------------------ |
Yesudeep Mangalapilly | 8e923ca | 2021-10-25 06:12:41 -0700 | [diff] [blame] | 24 | # #!/usr/bin/env bash |
c-parsons | 7a536d3 | 2019-02-11 17:18:56 -0500 | [diff] [blame] | 25 | # |
| 26 | # source path/to/unittest.bash || exit 1 |
| 27 | # |
| 28 | # # Test that foo works. |
| 29 | # function test_foo() { |
| 30 | # foo >$TEST_log || fail "foo failed"; |
| 31 | # expect_log "blah" "Expected to see 'blah' in output of 'foo'." |
| 32 | # } |
| 33 | # |
| 34 | # # Test that bar works. |
| 35 | # function test_bar() { |
| 36 | # bar 2>$TEST_log || fail "bar failed"; |
| 37 | # expect_not_log "ERROR" "Unexpected error from 'bar'." |
| 38 | # ... |
| 39 | # assert_equals $x $y |
| 40 | # } |
| 41 | # |
| 42 | # run_suite "Test suite for blah" |
| 43 | # ------------------------------------------------------------------------ |
| 44 | # |
| 45 | # Each test function is considered to pass iff fail() is not called |
| 46 | # while it is active. fail() may be called directly, or indirectly |
| 47 | # via other assertions such as expect_log(). run_suite must be called |
| 48 | # at the very end. |
| 49 | # |
| 50 | # A test function may redefine functions "set_up" and/or "tear_down"; |
| 51 | # these functions are executed before and after each test function, |
| 52 | # respectively. Similarly, "cleanup" and "timeout" may be redefined, |
| 53 | # and these function are called upon exit (of any kind) or a timeout. |
| 54 | # |
| 55 | # The user can pass --test_arg to bazel test to select specific tests |
| 56 | # to run. Specifying --test_arg multiple times allows to select several |
| 57 | # tests to be run in the given order. Additionally the user may define |
| 58 | # TESTS=(test_foo test_bar ...) to specify a subset of test functions to |
| 59 | # execute, for example, a working set during debugging. By default, all |
| 60 | # functions called test_* will be executed. |
| 61 | # |
| 62 | # This file provides utilities for assertions over the output of a |
| 63 | # command. The output of the command under test is directed to the |
| 64 | # file $TEST_log, and then the expect_log* assertions can be used to |
| 65 | # test for the presence of certain regular expressions in that file. |
| 66 | # |
| 67 | # The test framework is responsible for restoring the original working |
| 68 | # directory before each test. |
| 69 | # |
| 70 | # The order in which test functions are run is not defined, so it is |
| 71 | # important that tests clean up after themselves. |
| 72 | # |
| 73 | # Each test will be run in a new subshell. |
| 74 | # |
| 75 | # Functions named __* are not intended for use by clients. |
| 76 | # |
| 77 | # This framework implements the "test sharding protocol". |
| 78 | # |
| 79 | |
| 80 | [ -n "$BASH_VERSION" ] || |
| 81 | { echo "unittest.bash only works with bash!" >&2; exit 1; } |
| 82 | |
| 83 | DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) |
| 84 | |
| 85 | #### Configuration variables (may be overridden by testenv.sh or the suite): |
| 86 | |
| 87 | # This function may be called by testenv.sh or a test suite to enable errexit |
| 88 | # in a way that enables us to print pretty stack traces when something fails. |
| 89 | function enable_errexit() { |
| 90 | set -o errtrace |
| 91 | set -eu |
| 92 | trap __test_terminated_err ERR |
| 93 | } |
| 94 | |
| 95 | function disable_errexit() { |
| 96 | set +o errtrace |
| 97 | set +eu |
| 98 | trap - ERR |
| 99 | } |
| 100 | |
| 101 | #### Set up the test environment, branched from the old shell/testenv.sh |
| 102 | |
| 103 | # Enable errexit with pretty stack traces. |
| 104 | enable_errexit |
| 105 | |
| 106 | # Print message in "$1" then exit with status "$2" |
| 107 | die () { |
| 108 | # second argument is optional, defaulting to 1 |
| 109 | local status_code=${2:-1} |
| 110 | # Stop capturing stdout/stderr, and dump captured output |
| 111 | if [ "$CAPTURED_STD_ERR" -ne 0 -o "$CAPTURED_STD_OUT" -ne 0 ]; then |
| 112 | restore_outputs |
| 113 | if [ "$CAPTURED_STD_OUT" -ne 0 ]; then |
| 114 | cat "${TEST_TMPDIR}/captured.out" |
| 115 | CAPTURED_STD_OUT=0 |
| 116 | fi |
| 117 | if [ "$CAPTURED_STD_ERR" -ne 0 ]; then |
| 118 | cat "${TEST_TMPDIR}/captured.err" 1>&2 |
| 119 | CAPTURED_STD_ERR=0 |
| 120 | fi |
| 121 | fi |
| 122 | |
| 123 | if [ -n "${1-}" ] ; then |
| 124 | echo "$1" 1>&2 |
| 125 | fi |
| 126 | if [ -n "${BASH-}" ]; then |
| 127 | local caller_n=0 |
| 128 | while [ $caller_n -lt 4 ] && caller_out=$(caller $caller_n 2>/dev/null); do |
| 129 | test $caller_n -eq 0 && echo "CALLER stack (max 4):" |
| 130 | echo " $caller_out" |
| 131 | let caller_n=caller_n+1 |
| 132 | done 1>&2 |
| 133 | fi |
| 134 | if [ x"$status_code" != x -a x"$status_code" != x"0" ]; then |
| 135 | exit "$status_code" |
| 136 | else |
| 137 | exit 1 |
| 138 | fi |
| 139 | } |
| 140 | |
| 141 | # Print message in "$1" then record that a non-fatal error occurred in ERROR_COUNT |
| 142 | ERROR_COUNT="${ERROR_COUNT:-0}" |
| 143 | error () { |
| 144 | if [ -n "$1" ] ; then |
| 145 | echo "$1" 1>&2 |
| 146 | fi |
| 147 | ERROR_COUNT=$(($ERROR_COUNT + 1)) |
| 148 | } |
| 149 | |
| 150 | # Die if "$1" != "$2", print $3 as death reason |
| 151 | check_eq () { |
| 152 | [ "$1" = "$2" ] || die "Check failed: '$1' == '$2' ${3:+ ($3)}" |
| 153 | } |
| 154 | |
| 155 | # Die if "$1" == "$2", print $3 as death reason |
| 156 | check_ne () { |
| 157 | [ "$1" != "$2" ] || die "Check failed: '$1' != '$2' ${3:+ ($3)}" |
| 158 | } |
| 159 | |
| 160 | # The structure of the following if statements is such that if '[' fails |
| 161 | # (e.g., a non-number was passed in) then the check will fail. |
| 162 | |
| 163 | # Die if "$1" > "$2", print $3 as death reason |
| 164 | check_le () { |
| 165 | [ "$1" -gt "$2" ] || die "Check failed: '$1' <= '$2' ${3:+ ($3)}" |
| 166 | } |
| 167 | |
| 168 | # Die if "$1" >= "$2", print $3 as death reason |
| 169 | check_lt () { |
| 170 | [ "$1" -lt "$2" ] || die "Check failed: '$1' < '$2' ${3:+ ($3)}" |
| 171 | } |
| 172 | |
| 173 | # Die if "$1" < "$2", print $3 as death reason |
| 174 | check_ge () { |
| 175 | [ "$1" -ge "$2" ] || die "Check failed: '$1' >= '$2' ${3:+ ($3)}" |
| 176 | } |
| 177 | |
| 178 | # Die if "$1" <= "$2", print $3 as death reason |
| 179 | check_gt () { |
| 180 | [ "$1" -gt "$2" ] || die "Check failed: '$1' > '$2' ${3:+ ($3)}" |
| 181 | } |
| 182 | |
| 183 | # Die if $2 !~ $1; print $3 as death reason |
| 184 | check_match () |
| 185 | { |
| 186 | expr match "$2" "$1" >/dev/null || \ |
| 187 | die "Check failed: '$2' does not match regex '$1' ${3:+ ($3)}" |
| 188 | } |
| 189 | |
| 190 | # Run command "$1" at exit. Like "trap" but multiple atexits don't |
| 191 | # overwrite each other. Will break if someone does call trap |
| 192 | # directly. So, don't do that. |
| 193 | ATEXIT="${ATEXIT-}" |
| 194 | atexit () { |
| 195 | if [ -z "$ATEXIT" ]; then |
| 196 | ATEXIT="$1" |
| 197 | else |
| 198 | ATEXIT="$1 ; $ATEXIT" |
| 199 | fi |
| 200 | trap "$ATEXIT" EXIT |
| 201 | } |
| 202 | |
| 203 | ## TEST_TMPDIR |
| 204 | if [ -z "${TEST_TMPDIR:-}" ]; then |
| 205 | export TEST_TMPDIR="$(mktemp -d ${TMPDIR:-/tmp}/bazel-test.XXXXXXXX)" |
| 206 | fi |
| 207 | if [ ! -e "${TEST_TMPDIR}" ]; then |
| 208 | mkdir -p -m 0700 "${TEST_TMPDIR}" |
| 209 | # Clean TEST_TMPDIR on exit |
| 210 | atexit "rm -fr ${TEST_TMPDIR}" |
| 211 | fi |
| 212 | |
| 213 | # Functions to compare the actual output of a test to the expected |
| 214 | # (golden) output. |
| 215 | # |
| 216 | # Usage: |
| 217 | # capture_test_stdout |
| 218 | # ... do something ... |
| 219 | # diff_test_stdout "$TEST_SRCDIR/path/to/golden.out" |
| 220 | |
| 221 | # Redirect a file descriptor to a file. |
| 222 | CAPTURED_STD_OUT="${CAPTURED_STD_OUT:-0}" |
| 223 | CAPTURED_STD_ERR="${CAPTURED_STD_ERR:-0}" |
| 224 | |
| 225 | capture_test_stdout () { |
| 226 | exec 3>&1 # Save stdout as fd 3 |
| 227 | exec 4>"${TEST_TMPDIR}/captured.out" |
| 228 | exec 1>&4 |
| 229 | CAPTURED_STD_OUT=1 |
| 230 | } |
| 231 | |
| 232 | capture_test_stderr () { |
| 233 | exec 6>&2 # Save stderr as fd 6 |
| 234 | exec 7>"${TEST_TMPDIR}/captured.err" |
| 235 | exec 2>&7 |
| 236 | CAPTURED_STD_ERR=1 |
| 237 | } |
| 238 | |
| 239 | # Force XML_OUTPUT_FILE to an existing path |
| 240 | if [ -z "${XML_OUTPUT_FILE:-}" ]; then |
| 241 | XML_OUTPUT_FILE=${TEST_TMPDIR}/ouput.xml |
| 242 | fi |
| 243 | |
| 244 | #### Global variables: |
| 245 | |
| 246 | TEST_name="" # The name of the current test. |
| 247 | |
| 248 | TEST_log=$TEST_TMPDIR/log # The log file over which the |
| 249 | # expect_log* assertions work. Must |
| 250 | # be absolute to be robust against |
| 251 | # tests invoking 'cd'! |
| 252 | |
| 253 | TEST_passed="true" # The result of the current test; |
| 254 | # failed assertions cause this to |
| 255 | # become false. |
| 256 | |
| 257 | # These variables may be overridden by the test suite: |
| 258 | |
| 259 | TESTS=() # A subset or "working set" of test |
| 260 | # functions that should be run. By |
| 261 | # default, all tests called test_* are |
| 262 | # run. |
| 263 | if [ $# -gt 0 ]; then |
| 264 | # Legacy behavior is to ignore missing regexp, but with errexit |
| 265 | # the following line fails without || true. |
| 266 | # TODO(dmarting): maybe we should revisit the way of selecting |
| 267 | # test with that framework (use Bazel's environment variable instead). |
| 268 | TESTS=($(for i in $@; do echo $i; done | grep ^test_ || true)) |
| 269 | if (( ${#TESTS[@]} == 0 )); then |
| 270 | echo "WARNING: Arguments do not specifies tests!" >&2 |
| 271 | fi |
| 272 | fi |
| 273 | |
| 274 | TEST_verbose="true" # Whether or not to be verbose. A |
| 275 | # command; "true" or "false" are |
| 276 | # acceptable. The default is: true. |
| 277 | |
| 278 | TEST_script="$(pwd)/$0" # Full path to test script |
| 279 | |
| 280 | #### Internal functions |
| 281 | |
| 282 | function __show_log() { |
| 283 | echo "-- Test log: -----------------------------------------------------------" |
| 284 | [[ -e $TEST_log ]] && cat $TEST_log || echo "(Log file did not exist.)" |
| 285 | echo "------------------------------------------------------------------------" |
| 286 | } |
| 287 | |
| 288 | # Usage: __pad <title> <pad-char> |
| 289 | # Print $title padded to 80 columns with $pad_char. |
| 290 | function __pad() { |
| 291 | local title=$1 |
| 292 | local pad=$2 |
| 293 | { |
| 294 | echo -n "$pad$pad $title " |
| 295 | printf "%80s" " " | tr ' ' "$pad" |
| 296 | } | head -c 80 |
| 297 | echo |
| 298 | } |
| 299 | |
| 300 | #### Exported functions |
| 301 | |
| 302 | # Usage: init_test ... |
| 303 | # Deprecated. Has no effect. |
| 304 | function init_test() { |
| 305 | : |
| 306 | } |
| 307 | |
| 308 | |
| 309 | # Usage: set_up |
| 310 | # Called before every test function. May be redefined by the test suite. |
| 311 | function set_up() { |
| 312 | : |
| 313 | } |
| 314 | |
| 315 | # Usage: tear_down |
| 316 | # Called after every test function. May be redefined by the test suite. |
| 317 | function tear_down() { |
| 318 | : |
| 319 | } |
| 320 | |
| 321 | # Usage: cleanup |
| 322 | # Called upon eventual exit of the test suite. May be redefined by |
| 323 | # the test suite. |
| 324 | function cleanup() { |
| 325 | : |
| 326 | } |
| 327 | |
| 328 | # Usage: timeout |
| 329 | # Called upon early exit from a test due to timeout. |
| 330 | function timeout() { |
| 331 | : |
| 332 | } |
| 333 | |
| 334 | # Usage: fail <message> [<message> ...] |
| 335 | # Print failure message with context information, and mark the test as |
| 336 | # a failure. The context includes a stacktrace including the longest sequence |
| 337 | # of calls outside this module. (We exclude the top and bottom portions of |
| 338 | # the stack because they just add noise.) Also prints the contents of |
| 339 | # $TEST_log. |
| 340 | function fail() { |
| 341 | __show_log >&2 |
| 342 | echo "$TEST_name FAILED:" "$@" "." >&2 |
| 343 | echo "$@" >$TEST_TMPDIR/__fail |
| 344 | TEST_passed="false" |
| 345 | __show_stack |
| 346 | # Cleanup as we are leaving the subshell now |
| 347 | tear_down |
| 348 | exit 1 |
| 349 | } |
| 350 | |
| 351 | # Usage: warn <message> |
| 352 | # Print a test warning with context information. |
| 353 | # The context includes a stacktrace including the longest sequence |
| 354 | # of calls outside this module. (We exclude the top and bottom portions of |
| 355 | # the stack because they just add noise.) |
| 356 | function warn() { |
| 357 | __show_log >&2 |
| 358 | echo "$TEST_name WARNING: $1." >&2 |
| 359 | __show_stack |
| 360 | |
| 361 | if [ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]; then |
| 362 | echo "$TEST_name WARNING: $1." >> "$TEST_WARNINGS_OUTPUT_FILE" |
| 363 | fi |
| 364 | } |
| 365 | |
| 366 | # Usage: show_stack |
| 367 | # Prints the portion of the stack that does not belong to this module, |
| 368 | # i.e. the user's code that called a failing assertion. Stack may not |
| 369 | # be available if Bash is reading commands from stdin; an error is |
| 370 | # printed in that case. |
| 371 | __show_stack() { |
| 372 | local i=0 |
| 373 | local trace_found=0 |
| 374 | |
| 375 | # Skip over active calls within this module: |
| 376 | while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} == ${BASH_SOURCE[0]} ]]; do |
| 377 | (( ++i )) |
| 378 | done |
| 379 | |
| 380 | # Show all calls until the next one within this module (typically run_suite): |
| 381 | while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} != ${BASH_SOURCE[0]} ]]; do |
| 382 | # Read online docs for BASH_LINENO to understand the strange offset. |
| 383 | # Undefined can occur in the BASH_SOURCE stack apparently when one exits from a subshell |
| 384 | echo "${BASH_SOURCE[i]:-"Unknown"}:${BASH_LINENO[i - 1]:-"Unknown"}: in call to ${FUNCNAME[i]:-"Unknown"}" >&2 |
| 385 | (( ++i )) |
| 386 | trace_found=1 |
| 387 | done |
| 388 | |
| 389 | [ $trace_found = 1 ] || echo "[Stack trace not available]" >&2 |
| 390 | } |
| 391 | |
| 392 | # Usage: expect_log <regexp> [error-message] |
| 393 | # Asserts that $TEST_log matches regexp. Prints the contents of |
| 394 | # $TEST_log and the specified (optional) error message otherwise, and |
| 395 | # returns non-zero. |
| 396 | function expect_log() { |
| 397 | local pattern=$1 |
| 398 | local message=${2:-Expected regexp "$pattern" not found} |
| 399 | grep -sq -- "$pattern" $TEST_log && return 0 |
| 400 | |
| 401 | fail "$message" |
| 402 | return 1 |
| 403 | } |
| 404 | |
| 405 | # Usage: expect_log_warn <regexp> [error-message] |
| 406 | # Warns if $TEST_log does not match regexp. Prints the contents of |
| 407 | # $TEST_log and the specified (optional) error message on mismatch. |
| 408 | function expect_log_warn() { |
| 409 | local pattern=$1 |
| 410 | local message=${2:-Expected regexp "$pattern" not found} |
| 411 | grep -sq -- "$pattern" $TEST_log && return 0 |
| 412 | |
| 413 | warn "$message" |
| 414 | return 1 |
| 415 | } |
| 416 | |
| 417 | # Usage: expect_log_once <regexp> [error-message] |
| 418 | # Asserts that $TEST_log contains one line matching <regexp>. |
| 419 | # Prints the contents of $TEST_log and the specified (optional) |
| 420 | # error message otherwise, and returns non-zero. |
| 421 | function expect_log_once() { |
| 422 | local pattern=$1 |
| 423 | local message=${2:-Expected regexp "$pattern" not found exactly once} |
| 424 | expect_log_n "$pattern" 1 "$message" |
| 425 | } |
| 426 | |
| 427 | # Usage: expect_log_n <regexp> <count> [error-message] |
| 428 | # Asserts that $TEST_log contains <count> lines matching <regexp>. |
| 429 | # Prints the contents of $TEST_log and the specified (optional) |
| 430 | # error message otherwise, and returns non-zero. |
| 431 | function expect_log_n() { |
| 432 | local pattern=$1 |
| 433 | local expectednum=${2:-1} |
| 434 | local message=${3:-Expected regexp "$pattern" not found exactly $expectednum times} |
| 435 | local count=$(grep -sc -- "$pattern" $TEST_log) |
| 436 | [[ $count = $expectednum ]] && return 0 |
| 437 | fail "$message" |
| 438 | return 1 |
| 439 | } |
| 440 | |
| 441 | # Usage: expect_not_log <regexp> [error-message] |
| 442 | # Asserts that $TEST_log does not match regexp. Prints the contents |
| 443 | # of $TEST_log and the specified (optional) error message otherwise, and |
| 444 | # returns non-zero. |
| 445 | function expect_not_log() { |
| 446 | local pattern=$1 |
| 447 | local message=${2:-Unexpected regexp "$pattern" found} |
| 448 | grep -sq -- "$pattern" $TEST_log || return 0 |
| 449 | |
| 450 | fail "$message" |
| 451 | return 1 |
| 452 | } |
| 453 | |
| 454 | # Usage: expect_log_with_timeout <regexp> <timeout> [error-message] |
| 455 | # Waits for the given regexp in the $TEST_log for up to timeout seconds. |
| 456 | # Prints the contents of $TEST_log and the specified (optional) |
| 457 | # error message otherwise, and returns non-zero. |
| 458 | function expect_log_with_timeout() { |
| 459 | local pattern=$1 |
| 460 | local timeout=$2 |
| 461 | local message=${3:-Regexp "$pattern" not found in "$timeout" seconds} |
| 462 | local count=0 |
| 463 | while [ $count -lt $timeout ]; do |
| 464 | grep -sq -- "$pattern" $TEST_log && return 0 |
| 465 | let count=count+1 |
| 466 | sleep 1 |
| 467 | done |
| 468 | |
| 469 | grep -sq -- "$pattern" $TEST_log && return 0 |
| 470 | fail "$message" |
| 471 | return 1 |
| 472 | } |
| 473 | |
| 474 | # Usage: expect_cmd_with_timeout <expected> <cmd> [timeout] |
| 475 | # Repeats the command once a second for up to timeout seconds (10s by default), |
| 476 | # until the output matches the expected value. Fails and returns 1 if |
| 477 | # the command does not return the expected value in the end. |
| 478 | function expect_cmd_with_timeout() { |
| 479 | local expected="$1" |
| 480 | local cmd="$2" |
| 481 | local timeout=${3:-10} |
| 482 | local count=0 |
| 483 | while [ $count -lt $timeout ]; do |
| 484 | local actual="$($cmd)" |
| 485 | [ "$expected" = "$actual" ] && return 0 |
| 486 | let count=count+1 |
| 487 | sleep 1 |
| 488 | done |
| 489 | |
| 490 | [ "$expected" = "$actual" ] && return 0 |
| 491 | fail "Expected '$expected' within ${timeout}s, was '$actual'" |
| 492 | return 1 |
| 493 | } |
| 494 | |
| 495 | # Usage: assert_one_of <expected_list>... <actual> |
| 496 | # Asserts that actual is one of the items in expected_list |
| 497 | # Example: assert_one_of ( "foo", "bar", "baz" ) actualval |
| 498 | function assert_one_of() { |
| 499 | local args=("$@") |
| 500 | local last_arg_index=$((${#args[@]} - 1)) |
| 501 | local actual=${args[last_arg_index]} |
| 502 | unset args[last_arg_index] |
| 503 | for expected_item in "${args[@]}"; do |
| 504 | [ "$expected_item" = "$actual" ] && return 0 |
| 505 | done; |
| 506 | |
| 507 | fail "Expected one of '${args[@]}', was '$actual'" |
| 508 | return 1 |
| 509 | } |
| 510 | |
| 511 | # Usage: assert_equals <expected> <actual> |
| 512 | # Asserts [ expected = actual ]. |
| 513 | function assert_equals() { |
| 514 | local expected=$1 actual=$2 |
| 515 | [ "$expected" = "$actual" ] && return 0 |
| 516 | |
| 517 | fail "Expected '$expected', was '$actual'" |
| 518 | return 1 |
| 519 | } |
| 520 | |
| 521 | # Usage: assert_not_equals <unexpected> <actual> |
| 522 | # Asserts [ unexpected != actual ]. |
| 523 | function assert_not_equals() { |
| 524 | local unexpected=$1 actual=$2 |
| 525 | [ "$unexpected" != "$actual" ] && return 0; |
| 526 | |
| 527 | fail "Expected not '$unexpected', was '$actual'" |
| 528 | return 1 |
| 529 | } |
| 530 | |
| 531 | # Usage: assert_contains <regexp> <file> [error-message] |
| 532 | # Asserts that file matches regexp. Prints the contents of |
| 533 | # file and the specified (optional) error message otherwise, and |
| 534 | # returns non-zero. |
| 535 | function assert_contains() { |
| 536 | local pattern=$1 |
| 537 | local file=$2 |
| 538 | local message=${3:-Expected regexp "$pattern" not found in "$file"} |
| 539 | grep -sq -- "$pattern" "$file" && return 0 |
| 540 | |
| 541 | cat "$file" >&2 |
| 542 | fail "$message" |
| 543 | return 1 |
| 544 | } |
| 545 | |
| 546 | # Usage: assert_not_contains <regexp> <file> [error-message] |
| 547 | # Asserts that file does not match regexp. Prints the contents of |
| 548 | # file and the specified (optional) error message otherwise, and |
| 549 | # returns non-zero. |
| 550 | function assert_not_contains() { |
| 551 | local pattern=$1 |
| 552 | local file=$2 |
| 553 | local message=${3:-Expected regexp "$pattern" found in "$file"} |
| 554 | grep -sq -- "$pattern" "$file" || return 0 |
| 555 | |
| 556 | cat "$file" >&2 |
| 557 | fail "$message" |
| 558 | return 1 |
| 559 | } |
| 560 | |
| 561 | # Updates the global variables TESTS if |
| 562 | # sharding is enabled, i.e. ($TEST_TOTAL_SHARDS > 0). |
| 563 | function __update_shards() { |
| 564 | [ -z "${TEST_TOTAL_SHARDS-}" ] && return 0 |
| 565 | |
| 566 | [ "$TEST_TOTAL_SHARDS" -gt 0 ] || |
| 567 | { echo "Invalid total shards $TEST_TOTAL_SHARDS" >&2; exit 1; } |
| 568 | |
| 569 | [ "$TEST_SHARD_INDEX" -lt 0 -o "$TEST_SHARD_INDEX" -ge "$TEST_TOTAL_SHARDS" ] && |
| 570 | { echo "Invalid shard $shard_index" >&2; exit 1; } |
| 571 | |
| 572 | TESTS=$(for test in "${TESTS[@]}"; do echo "$test"; done | |
| 573 | awk "NR % $TEST_TOTAL_SHARDS == $TEST_SHARD_INDEX") |
| 574 | |
| 575 | [ -z "${TEST_SHARD_STATUS_FILE-}" ] || touch "$TEST_SHARD_STATUS_FILE" |
| 576 | } |
| 577 | |
| 578 | # Usage: __test_terminated <signal-number> |
| 579 | # Handler that is called when the test terminated unexpectedly |
| 580 | function __test_terminated() { |
| 581 | __show_log >&2 |
| 582 | echo "$TEST_name FAILED: terminated by signal $1." >&2 |
| 583 | TEST_passed="false" |
| 584 | __show_stack |
| 585 | timeout |
| 586 | exit 1 |
| 587 | } |
| 588 | |
| 589 | # Usage: __test_terminated_err |
| 590 | # Handler that is called when the test terminated unexpectedly due to "errexit". |
| 591 | function __test_terminated_err() { |
| 592 | # When a subshell exits due to signal ERR, its parent shell also exits, |
| 593 | # thus the signal handler is called recursively and we print out the |
| 594 | # error message and stack trace multiple times. We're only interested |
| 595 | # in the first one though, as it contains the most information, so ignore |
| 596 | # all following. |
| 597 | if [[ -f $TEST_TMPDIR/__err_handled ]]; then |
| 598 | exit 1 |
| 599 | fi |
| 600 | __show_log >&2 |
| 601 | if [[ ! -z "$TEST_name" ]]; then |
| 602 | echo -n "$TEST_name " |
| 603 | fi |
| 604 | echo "FAILED: terminated because this command returned a non-zero status:" >&2 |
| 605 | touch $TEST_TMPDIR/__err_handled |
| 606 | TEST_passed="false" |
| 607 | __show_stack |
| 608 | # If $TEST_name is still empty, the test suite failed before we even started |
| 609 | # to run tests, so we shouldn't call tear_down. |
| 610 | if [[ ! -z "$TEST_name" ]]; then |
| 611 | tear_down |
| 612 | fi |
| 613 | exit 1 |
| 614 | } |
| 615 | |
| 616 | # Usage: __trap_with_arg <handler> <signals ...> |
| 617 | # Helper to install a trap handler for several signals preserving the signal |
| 618 | # number, so that the signal number is available to the trap handler. |
| 619 | function __trap_with_arg() { |
| 620 | func="$1" ; shift |
| 621 | for sig ; do |
| 622 | trap "$func $sig" "$sig" |
| 623 | done |
| 624 | } |
| 625 | |
| 626 | # Usage: <node> <block> |
| 627 | # Adds the block to the given node in the report file. Quotes in the in |
| 628 | # arguments need to be escaped. |
| 629 | function __log_to_test_report() { |
| 630 | local node="$1" |
| 631 | local block="$2" |
| 632 | if [[ ! -e "$XML_OUTPUT_FILE" ]]; then |
| 633 | local xml_header='<?xml version="1.0" encoding="UTF-8"?>' |
| 634 | echo "$xml_header<testsuites></testsuites>" > $XML_OUTPUT_FILE |
| 635 | fi |
| 636 | |
| 637 | # replace match on node with block and match |
| 638 | # replacement expression only needs escaping for quotes |
| 639 | perl -e "\ |
| 640 | \$input = @ARGV[0]; \ |
| 641 | \$/=undef; \ |
| 642 | open FILE, '+<$XML_OUTPUT_FILE'; \ |
| 643 | \$content = <FILE>; \ |
| 644 | if (\$content =~ /($node.*)\$/) { \ |
| 645 | seek FILE, 0, 0; \ |
| 646 | print FILE \$\` . \$input . \$1; \ |
| 647 | }; \ |
| 648 | close FILE" "$block" |
| 649 | } |
| 650 | |
| 651 | # Usage: <total> <passed> |
| 652 | # Adds the test summaries to the xml nodes. |
| 653 | function __finish_test_report() { |
| 654 | local total=$1 |
| 655 | local passed=$2 |
| 656 | local failed=$((total - passed)) |
| 657 | |
| 658 | cat $XML_OUTPUT_FILE | \ |
| 659 | sed \ |
| 660 | "s/<testsuites>/<testsuites tests=\"$total\" failures=\"0\" errors=\"$failed\">/" | \ |
| 661 | sed \ |
| 662 | "s/<testsuite>/<testsuite tests=\"$total\" failures=\"0\" errors=\"$failed\">/" \ |
| 663 | > $XML_OUTPUT_FILE.bak |
| 664 | |
| 665 | rm -f $XML_OUTPUT_FILE |
| 666 | mv $XML_OUTPUT_FILE.bak $XML_OUTPUT_FILE |
| 667 | } |
| 668 | |
| 669 | # Multi-platform timestamp function |
| 670 | UNAME=$(uname -s | tr 'A-Z' 'a-z') |
| 671 | if [ "$UNAME" = "linux" ] || [[ "$UNAME" =~ msys_nt* ]]; then |
| 672 | function timestamp() { |
| 673 | echo $(($(date +%s%N)/1000000)) |
| 674 | } |
| 675 | else |
| 676 | function timestamp() { |
| 677 | # OS X and FreeBSD do not have %N so python is the best we can do |
| 678 | python -c 'import time; print int(round(time.time() * 1000))' |
| 679 | } |
| 680 | fi |
| 681 | |
| 682 | function get_run_time() { |
| 683 | local ts_start=$1 |
| 684 | local ts_end=$2 |
| 685 | run_time_ms=$((${ts_end}-${ts_start})) |
| 686 | echo $(($run_time_ms/1000)).${run_time_ms: -3} |
| 687 | } |
| 688 | |
| 689 | # Usage: run_tests <suite-comment> |
| 690 | # Must be called from the end of the user's test suite. |
| 691 | # Calls exit with zero on success, non-zero otherwise. |
| 692 | function run_suite() { |
| 693 | echo >&2 |
| 694 | echo "$1" >&2 |
| 695 | echo >&2 |
| 696 | |
| 697 | __log_to_test_report "<\/testsuites>" "<testsuite></testsuite>" |
| 698 | |
| 699 | local total=0 |
| 700 | local passed=0 |
| 701 | |
| 702 | atexit "cleanup" |
| 703 | |
| 704 | # If the user didn't specify an explicit list of tests (e.g. a |
| 705 | # working set), use them all. |
| 706 | if [ ${#TESTS[@]} = 0 ]; then |
| 707 | TESTS=$(declare -F | awk '{print $3}' | grep ^test_) |
| 708 | elif [ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]; then |
| 709 | if grep -q "TESTS=" "$TEST_script" ; then |
| 710 | echo "TESTS variable overridden in Bazel sh_test. Please remove before submitting" \ |
| 711 | >> "$TEST_WARNINGS_OUTPUT_FILE" |
| 712 | fi |
| 713 | fi |
| 714 | |
| 715 | __update_shards |
| 716 | |
| 717 | for TEST_name in ${TESTS[@]}; do |
| 718 | >$TEST_log # Reset the log. |
| 719 | TEST_passed="true" |
| 720 | |
| 721 | total=$(($total + 1)) |
| 722 | if [[ "$TEST_verbose" == "true" ]]; then |
| 723 | __pad $TEST_name '*' >&2 |
| 724 | fi |
| 725 | |
| 726 | local run_time="0.0" |
| 727 | rm -f $TEST_TMPDIR/{__ts_start,__ts_end} |
| 728 | |
| 729 | if [ "$(type -t $TEST_name)" = function ]; then |
| 730 | # Save exit handlers eventually set. |
| 731 | local SAVED_ATEXIT="$ATEXIT"; |
| 732 | ATEXIT= |
| 733 | |
| 734 | # Run test in a subshell. |
| 735 | rm -f $TEST_TMPDIR/__err_handled |
| 736 | __trap_with_arg __test_terminated INT KILL PIPE TERM ABRT FPE ILL QUIT SEGV |
| 737 | ( |
| 738 | timestamp >$TEST_TMPDIR/__ts_start |
| 739 | set_up |
| 740 | eval $TEST_name |
| 741 | tear_down |
| 742 | timestamp >$TEST_TMPDIR/__ts_end |
| 743 | test $TEST_passed == "true" |
| 744 | ) 2>&1 | tee $TEST_TMPDIR/__log |
| 745 | # Note that tee will prevent the control flow continuing if the test |
| 746 | # spawned any processes which are still running and have not closed |
| 747 | # their stdout. |
| 748 | |
| 749 | test_subshell_status=${PIPESTATUS[0]} |
| 750 | if [ "$test_subshell_status" != 0 ]; then |
| 751 | TEST_passed="false" |
| 752 | # Ensure that an end time is recorded in case the test subshell |
| 753 | # terminated prematurely. |
| 754 | [ -f $TEST_TMPDIR/__ts_end ] || timestamp >$TEST_TMPDIR/__ts_end |
| 755 | fi |
| 756 | |
| 757 | # Calculate run time for the testcase. |
| 758 | local ts_start=$(cat $TEST_TMPDIR/__ts_start) |
| 759 | local ts_end=$(cat $TEST_TMPDIR/__ts_end) |
| 760 | run_time=$(get_run_time $ts_start $ts_end) |
| 761 | |
| 762 | # Eventually restore exit handlers. |
| 763 | if [ -n "$SAVED_ATEXIT" ]; then |
| 764 | ATEXIT="$SAVED_ATEXIT" |
| 765 | trap "$ATEXIT" EXIT |
| 766 | fi |
| 767 | else # Bad test explicitly specified in $TESTS. |
| 768 | fail "Not a function: '$TEST_name'" |
| 769 | fi |
| 770 | |
| 771 | local testcase_tag="" |
| 772 | |
| 773 | if [[ "$TEST_passed" == "true" ]]; then |
| 774 | if [[ "$TEST_verbose" == "true" ]]; then |
| 775 | echo "PASSED: $TEST_name" >&2 |
| 776 | fi |
| 777 | passed=$(($passed + 1)) |
| 778 | testcase_tag="<testcase name=\"$TEST_name\" status=\"run\" time=\"$run_time\" classname=\"\"></testcase>" |
| 779 | else |
| 780 | echo "FAILED: $TEST_name" >&2 |
| 781 | # end marker in CDATA cannot be escaped, we need to split the CDATA sections |
| 782 | log=$(cat $TEST_TMPDIR/__log | sed 's/]]>/]]>]]><![CDATA[/g') |
| 783 | fail_msg=$(cat $TEST_TMPDIR/__fail 2> /dev/null || echo "No failure message") |
| 784 | testcase_tag="<testcase name=\"$TEST_name\" status=\"run\" time=\"$run_time\" classname=\"\"><error message=\"$fail_msg\"><![CDATA[$log]]></error></testcase>" |
| 785 | fi |
| 786 | |
| 787 | if [[ "$TEST_verbose" == "true" ]]; then |
| 788 | echo >&2 |
| 789 | fi |
| 790 | __log_to_test_report "<\/testsuite>" "$testcase_tag" |
| 791 | done |
| 792 | |
| 793 | __finish_test_report $total $passed |
| 794 | __pad "$passed / $total tests passed." '*' >&2 |
| 795 | [ $total = $passed ] || { |
| 796 | __pad "There were errors." '*' |
| 797 | exit 1 |
| 798 | } >&2 |
| 799 | |
| 800 | exit 0 |
| 801 | } |