|
12 | 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 | 13 | # See the License for the specific language governing permissions and
|
14 | 14 | # limitations under the License.
|
15 |
| -set -e |
16 |
| -# THIS FILE ASSUMES IT IS RUN INSIDE THE tests/tests_<package> DIRECTORY |
| 15 | + |
| 16 | +# THIS FILE ASSUMES IT IS RUN INSIDE THE tests DIRECTORY. |
17 | 17 |
|
18 | 18 | # Batch size for testing: Determines how many standalone test invocations run in parallel
|
19 |
| -# It can be set through the env variable PL_STANDALONE_TESTS_BATCH_SIZE and defaults to 6 if not set |
20 |
| -test_batch_size="${PL_STANDALONE_TESTS_BATCH_SIZE:-3}" |
21 |
| -source="${PL_STANDALONE_TESTS_SOURCE:-"lightning"}" |
22 |
| -# this is the directory where the tests are located |
| 19 | +# It can be set through the env variable NUM_PARALLEL_TESTS and defaults to 5 if not set |
| 20 | +test_batch_size="${NUM_PARALLEL_TESTS:-5}" |
| 21 | + |
| 22 | +# Source directory for coverage runs can be set with CODECOV_SOURCE and defaults to lightning. |
| 23 | +codecov_source="${CODECOV_SOURCE:-"lightning"}" |
| 24 | + |
| 25 | +# The test directory is passed as the first argument to the script |
23 | 26 | test_dir=$1 # parse the first argument
|
| 27 | + |
| 28 | +# There is also timeout for the tests. |
| 29 | +# It can be set through the env variable TEST_TIMEOUT and defaults to 1200 seconds if not set 1200 seconds |
| 30 | +test_timeout="${TEST_TIMEOUT:-1200}" |
| 31 | + |
| 32 | +# Temporary file to store the collected tests |
24 | 33 | COLLECTED_TESTS_FILE="collected_tests.txt"
|
25 | 34 |
|
26 | 35 | ls -lh . # show the contents of the directory
|
27 | 36 |
|
28 |
| -# this environment variable allows special tests to run |
29 |
| -export PL_RUN_STANDALONE_TESTS=1 |
30 |
| -# python arguments |
31 |
| -defaults=" -m coverage run --source ${source} --append -m pytest --no-header -v -s --timeout 120 " |
| 37 | +# Python arguments for running the tests and coverage |
| 38 | +defaults=" -m coverage run --source ${codecov_source} --append -m pytest --no-header -v -s --color=yes --timeout=${test_timeout} --durations=0 " |
32 | 39 | echo "Using defaults: ${defaults}"
|
33 | 40 |
|
34 |
| -# get the list of parametrizations. we need to call them separately. the last two lines are removed. |
| 41 | +# Get the list of parametrizations. we need to call them separately. the last two lines are removed. |
35 | 42 | # note: if there's a syntax error, this will fail with some garbled output
|
36 |
| -python3 -um pytest $test_dir -q --collect-only --pythonwarnings ignore 2>&1 > $COLLECTED_TESTS_FILE |
37 |
| -# early terminate if collection failed (e.g. syntax error) |
| 43 | +python -um pytest ${test_dir} -q --collect-only --pythonwarnings ignore 2>&1 > $COLLECTED_TESTS_FILE |
| 44 | +# Early terminate if collection failed (e.g. syntax error) |
38 | 45 | if [[ $? != 0 ]]; then
|
39 | 46 | cat $COLLECTED_TESTS_FILE
|
| 47 | + printf "ERROR: test collection failed!\n" |
40 | 48 | exit 1
|
41 | 49 | fi
|
42 | 50 |
|
43 |
| -# removes the last line of the file |
44 |
| -sed -i '$d' $COLLECTED_TESTS_FILE |
| 51 | +# Initialize empty array |
| 52 | +tests=() |
45 | 53 |
|
46 |
| -# Get test list and run each test individually |
47 |
| -tests=($(grep -oP '\S+::test_\S+' "$COLLECTED_TESTS_FILE")) |
| 54 | +# Read from file line by line |
| 55 | +while IFS= read -r line; do |
| 56 | + # Only keep lines containing "test_" |
| 57 | + if [[ $line == *"test_"* ]]; then |
| 58 | + # Extract part after test_dir/ |
| 59 | + pruned_line="${line#*${test_dir}/}" |
| 60 | + tests+=("${test_dir}/$pruned_line") |
| 61 | + fi |
| 62 | +done < $COLLECTED_TESTS_FILE |
| 63 | + |
| 64 | +# Count tests |
48 | 65 | test_count=${#tests[@]}
|
49 |
| -# present the collected tests |
| 66 | + |
| 67 | +# Display results |
50 | 68 | printf "collected $test_count tests:\n-------------------\n"
|
51 |
| -# replace space with new line |
52 |
| -echo "${tests[@]}" | tr ' ' '\n' |
| 69 | +printf "%s\n" "${tests[@]}" |
53 | 70 | printf "\n===================\n"
|
54 | 71 |
|
55 | 72 | # if test count is one print warning
|
|
63 | 80 | # clear all the collected reports
|
64 | 81 | rm -f parallel_test_output-*.txt # in case it exists, remove it
|
65 | 82 |
|
66 |
| - |
67 |
| -status=0 # reset the script status |
| 83 | +status=0 # aggregated script status |
68 | 84 | report="" # final report
|
69 | 85 | pids=() # array of PID for running tests
|
70 | 86 | test_ids=() # array of indexes of running tests
|
71 |
| -printf "Running $test_count tests in batches of $test_batch_size\n" |
| 87 | +failed_tests=() # array of failed tests |
| 88 | +printf "Running $test_count tests in batches of $test_batch_size:\n" |
72 | 89 | for i in "${!tests[@]}"; do
|
73 |
| - # remove initial "tests/" from the test name |
74 |
| - test=${tests[$i]/tests\//} |
75 |
| - printf "Running test $((i+1))/$test_count: $test\n" |
| 90 | + test=${tests[$i]} |
| 91 | + printf "* Running test $((i+1))/$test_count: $test\n" |
76 | 92 |
|
77 | 93 | # execute the test in the background
|
78 | 94 | # redirect to a log file that buffers test output. since the tests will run in the background,
|
79 | 95 | # we cannot let them output to std{out,err} because the outputs would be garbled together
|
80 |
| - python3 ${defaults} "$test" 2>&1 > "standalone_test_output-$i.txt" & |
| 96 | + python ${defaults} "$test" &> "parallel_test_output-$i.txt" & |
81 | 97 | test_ids+=($i) # save the test's id in an array with running tests
|
82 | 98 | pids+=($!) # save the PID in an array with running tests
|
83 | 99 |
|
84 | 100 | # if we reached the batch size, wait for all tests to finish
|
85 | 101 | if (( (($i + 1) % $test_batch_size == 0) || $i == $test_count-1 )); then
|
86 |
| - printf "Waiting for batch to finish: $(IFS=' '; echo "${pids[@]}")\n" |
| 102 | + printf "-> Waiting for batch to finish: $(IFS=' '; echo "${pids[@]}")\n" |
87 | 103 | # wait for running tests
|
88 | 104 | for j in "${!test_ids[@]}"; do
|
89 | 105 | i=${test_ids[$j]} # restore the global test's id
|
90 | 106 | pid=${pids[$j]} # restore the particular PID
|
91 | 107 | test=${tests[$i]} # restore the test name
|
92 |
| - printf "Waiting for $tests >> standalone_test_output-$i.txt (PID: $pid)\n" |
| 108 | + printf "? Waiting for $tests >> parallel_test_output-$i.txt (PID: $pid)\n" |
93 | 109 | wait -n $pid
|
94 | 110 | # get the exit status of the test
|
95 | 111 | test_status=$?
|
96 | 112 | # add row to the final report
|
97 | 113 | report+="Ran\t$test\t>> exit:$test_status\n"
|
98 | 114 | if [[ $test_status != 0 ]]; then
|
99 |
| - # show the output of the failed test |
100 |
| - cat "standalone_test_output-$i.txt" |
| 115 | + # add the test to the failed tests array |
| 116 | + failed_tests+=($i) |
101 | 117 | # Process exited with a non-zero exit status
|
102 | 118 | status=$test_status
|
103 | 119 | fi
|
104 | 120 | done
|
| 121 | + printf "Starting over with a new batch...\n" |
105 | 122 | test_ids=() # reset the test's id array
|
106 | 123 | pids=() # reset the PID array
|
107 | 124 | fi
|
108 | 125 | done
|
109 | 126 |
|
110 |
| -# echo test report |
| 127 | +# print test report with exit code for each test |
111 | 128 | printf '=%.s' {1..80}
|
112 | 129 | printf "\n$report"
|
113 | 130 | printf '=%.s' {1..80}
|
114 | 131 | printf '\n'
|
115 | 132 |
|
116 |
| -# exit with the worst test result |
| 133 | +# print failed tests from duped logs |
| 134 | +if [[ ${#failed_tests[@]} -gt 0 ]]; then |
| 135 | + printf "Failed tests:\n" |
| 136 | + for i in "${failed_tests[@]}"; do |
| 137 | + printf '\n%.s' {1..5} |
| 138 | + printf '=%.s' {1..80} |
| 139 | + printf "\n${tests[$i]}\n" |
| 140 | + printf '-%.s' {1..80} |
| 141 | + printf "\n" |
| 142 | + # show the output of the failed test |
| 143 | + cat "parallel_test_output-$i.txt" |
| 144 | + printf "\n" |
| 145 | + printf '=%.s' {1..80} |
| 146 | + done |
| 147 | +else |
| 148 | + printf "All tests passed!\n" |
| 149 | +fi |
| 150 | + |
| 151 | +# exit with the worse test result |
117 | 152 | exit $status
|
0 commit comments