Makefile revision 1.59
1# $NetBSD: Makefile,v 1.59 2020/06/28 09:42:40 rillig Exp $ 2# 3# Unit tests for make(1) 4# The main targets are: 5# 6# all: run all the tests 7# test: run 'all', and compare to expected results 8# accept: move generated output to expected results 9# 10# Adding a test case. 11# Each feature should get its own set of tests in its own suitably 12# named makefile (*.mk), with its own set of expected results (*.exp), 13# and it should be added to the TESTNAMES list. 14# 15 16.MAIN: all 17 18UNIT_TESTS:= ${.PARSEDIR} 19.PATH: ${UNIT_TESTS} 20 21# Each test is in a sub-makefile. 22# Keep the list sorted. 23TESTNAMES= \ 24 comment \ 25 cond-late \ 26 cond-short \ 27 cond1 \ 28 cond2 \ 29 dollar \ 30 doterror \ 31 dotwait \ 32 error \ 33 export \ 34 export-all \ 35 export-env \ 36 forloop \ 37 forsubst \ 38 hash \ 39 include-main \ 40 misc \ 41 moderrs \ 42 modmatch \ 43 modmisc \ 44 modorder \ 45 modts \ 46 modword \ 47 order \ 48 posix \ 49 qequals \ 50 sunshcmd \ 51 sysv \ 52 ternary \ 53 unexport \ 54 unexport-env \ 55 varcmd \ 56 varmisc \ 57 varmod-edge \ 58 varquote \ 59 varshell 60 61# these tests were broken by referting POSIX chanegs 62STRICT_POSIX_TESTS = \ 63 escape \ 64 impsrc \ 65 phony-end \ 66 posix1 \ 67 suffixes 68 69# Override make flags for certain tests 70flags.doterror= 71flags.order=-j1 72 73OUTFILES= ${TESTNAMES:S/$/.out/} 74 75all: ${OUTFILES} 76 77CLEANFILES += *.rawout *.out *.status *.tmp *.core *.tmp 78CLEANFILES += obj*.[och] lib*.a # posix1.mk 79CLEANFILES += issue* .[ab]* # suffixes.mk 80CLEANRECURSIVE += dir dummy # posix1.mk 81 82clean: 83 rm -f ${CLEANFILES} 84.if !empty(CLEANRECURSIVE) 85 rm -rf ${CLEANRECURSIVE} 86.endif 87 88TEST_MAKE?= ${.MAKE} 89TOOL_SED?= sed 90 91# ensure consistent results from sort(1) 92LC_ALL= C 93LANG= C 94.export LANG LC_ALL 95 96# the tests are actually done with sub-makes. 97.SUFFIXES: .mk .rawout .out 98.mk.rawout: 99 @echo ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} 100 -@cd ${.OBJDIR} && \ 101 { ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} \ 102 2>&1 ; echo $$? >${.TARGET:R}.status ; } > ${.TARGET}.tmp 103 @mv ${.TARGET}.tmp ${.TARGET} 104 105# We always pretend .MAKE was called 'make' 106# and strip ${.CURDIR}/ from the output 107# and replace anything after 'stopped in' with unit-tests 108# so the results can be compared. 109.rawout.out: 110 @echo postprocess ${.TARGET} 111 @${TOOL_SED} -e 's,^${TEST_MAKE:T:C/\./\\\./g}[][0-9]*:,make:,' \ 112 -e 's,${TEST_MAKE:C/\./\\\./g},make,' \ 113 -e '/stopped/s, /.*, unit-tests,' \ 114 -e 's,${.CURDIR:C/\./\\\./g}/,,g' \ 115 -e 's,${UNIT_TESTS:C/\./\\\./g}/,,g' \ 116 < ${.IMPSRC} > ${.TARGET}.tmp 117 @echo "exit status `cat ${.TARGET:R}.status`" >> ${.TARGET}.tmp 118 @mv ${.TARGET}.tmp ${.TARGET} 119 120# Compare all output files 121test: ${OUTFILES} .PHONY 122 @failed= ; \ 123 for test in ${TESTNAMES}; do \ 124 diff -u ${UNIT_TESTS}/$${test}.exp $${test}.out \ 125 || failed="$${failed}$${failed:+ }$${test}" ; \ 126 done ; \ 127 if [ -n "$${failed}" ]; then \ 128 echo "Failed tests: $${failed}" ; false ; \ 129 else \ 130 echo "All tests passed" ; \ 131 fi 132 133accept: 134 @for test in ${TESTNAMES}; do \ 135 cmp -s ${UNIT_TESTS}/$${test}.exp $${test}.out \ 136 || { echo "Replacing $${test}.exp" ; \ 137 cp $${test}.out ${UNIT_TESTS}/$${test}.exp ; } \ 138 done 139 140.if exists(${TEST_MAKE}) 141${TESTNAMES:S/$/.rawout/}: ${TEST_MAKE} 142.endif 143 144.-include <bsd.obj.mk> 145