xref: /netbsd-src/usr.bin/make/unit-tests/Makefile (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1# $NetBSD: Makefile,v 1.53 2018/05/24 00:25:44 christos Exp $
2#
3# Unit tests for make(1)
4# The main targets are:
5#
6# all:	run all the tests
7# test:	run 'all', and compare to expected results
8# accept: move generated output to expected results
9#
10# Adding a test case.
11# Each feature should get its own set of tests in its own suitably
12# named makefile (*.mk), with its own set of expected results (*.exp),
13# and it should be added to the TESTNAMES list.
14#
15
16.MAIN: all
17
18UNIT_TESTS:= ${.PARSEDIR}
19.PATH: ${UNIT_TESTS}
20
21# Each test is in a sub-makefile.
22# Keep the list sorted.
23TESTNAMES= \
24	comment \
25	cond1 \
26	cond2 \
27	error \
28	export \
29	export-all \
30	export-env \
31	doterror \
32	dotwait \
33	forloop \
34	forsubst \
35	hash \
36	misc \
37	moderrs \
38	modmatch \
39	modmisc \
40	modorder \
41	modts \
42	modword \
43	order \
44	posix \
45	qequals \
46	sunshcmd \
47	sysv \
48	ternary \
49	unexport \
50	unexport-env \
51	varcmd \
52	varmisc \
53	varquote \
54	varshell
55
56# these tests were broken by referting POSIX chanegs
57STRICT_POSIX_TESTS = \
58	escape \
59	impsrc \
60	phony-end \
61	posix1 \
62	suffixes
63
64# Override make flags for certain tests
65flags.doterror=
66flags.order=-j1
67
68OUTFILES= ${TESTNAMES:S/$/.out/}
69
70all: ${OUTFILES}
71
72CLEANFILES += *.rawout *.out *.status *.tmp *.core *.tmp
73CLEANFILES += obj*.[och] lib*.a		# posix1.mk
74CLEANFILES += issue* .[ab]*		# suffixes.mk
75CLEANRECURSIVE += dir dummy		# posix1.mk
76
77clean:
78	rm -f ${CLEANFILES}
79.if !empty(CLEANRECURSIVE)
80	rm -rf ${CLEANRECURSIVE}
81.endif
82
83TEST_MAKE?= ${.MAKE}
84TOOL_SED?= sed
85
86# ensure consistent results from sort(1)
87LC_ALL= C
88LANG= C
89.export LANG LC_ALL
90
91# the tests are actually done with sub-makes.
92.SUFFIXES: .mk .rawout .out
93.mk.rawout:
94	@echo ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC}
95	-@cd ${.OBJDIR} && \
96	{ ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} \
97	  2>&1 ; echo $$? >${.TARGET:R}.status ; } > ${.TARGET}.tmp
98	@mv ${.TARGET}.tmp ${.TARGET}
99
100# We always pretend .MAKE was called 'make'
101# and strip ${.CURDIR}/ from the output
102# and replace anything after 'stopped in' with unit-tests
103# so the results can be compared.
104.rawout.out:
105	@echo postprocess ${.TARGET}
106	@${TOOL_SED} -e 's,^${TEST_MAKE:T:C/\./\\\./g}[][0-9]*:,make:,' \
107	  -e 's,${TEST_MAKE:C/\./\\\./g},make,' \
108	  -e '/stopped/s, /.*, unit-tests,' \
109	  -e 's,${.CURDIR:C/\./\\\./g}/,,g' \
110	  -e 's,${UNIT_TESTS:C/\./\\\./g}/,,g' \
111	  < ${.IMPSRC} > ${.TARGET}.tmp
112	@echo "exit status `cat ${.TARGET:R}.status`" >> ${.TARGET}.tmp
113	@mv ${.TARGET}.tmp ${.TARGET}
114
115# Compare all output files
116test:	${OUTFILES} .PHONY
117	@failed= ; \
118	for test in ${TESTNAMES}; do \
119	  diff -u ${UNIT_TESTS}/$${test}.exp $${test}.out \
120	  || failed="$${failed}$${failed:+ }$${test}" ; \
121	done ; \
122	if [ -n "$${failed}" ]; then \
123	  echo "Failed tests: $${failed}" ; false ; \
124	else \
125	  echo "All tests passed" ; \
126	fi
127
128accept:
129	@for test in ${TESTNAMES}; do \
130	  cmp -s ${UNIT_TESTS}/$${test}.exp $${test}.out \
131	  || { echo "Replacing $${test}.exp" ; \
132	       cp $${test}.out ${UNIT_TESTS}/$${test}.exp ; } \
133	done
134
135.if exists(${TEST_MAKE})
136${TESTNAMES:S/$/.rawout/}: ${TEST_MAKE}
137.endif
138
139.-include <bsd.obj.mk>
140