xref: /netbsd-src/external/bsd/mdocml/dist/man_macro.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$Vendor-Id: man_macro.c,v 1.49 2010/07/22 23:03:15 kristaps Exp $ */
2 /*
3  * Copyright (c) 2008, 2009 Kristaps Dzonsons <kristaps@bsd.lv>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20 
21 #include <assert.h>
22 #include <ctype.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include "mandoc.h"
27 #include "libman.h"
28 
29 enum	rew {
30 	REW_REWIND,
31 	REW_NOHALT,
32 	REW_HALT
33 };
34 
35 static	int		 blk_close(MACRO_PROT_ARGS);
36 static	int		 blk_exp(MACRO_PROT_ARGS);
37 static	int		 blk_imp(MACRO_PROT_ARGS);
38 static	int		 in_line_eoln(MACRO_PROT_ARGS);
39 
40 static	int		 rew_scope(enum man_type,
41 				struct man *, enum mant);
42 static	enum rew	 rew_dohalt(enum mant, enum man_type,
43 				const struct man_node *);
44 static	enum rew	 rew_block(enum mant, enum man_type,
45 				const struct man_node *);
46 static	int		 rew_warn(struct man *,
47 				struct man_node *, enum mandocerr);
48 
49 const	struct man_macro __man_macros[MAN_MAX] = {
50 	{ in_line_eoln, MAN_NSCOPED }, /* br */
51 	{ in_line_eoln, 0 }, /* TH */
52 	{ blk_imp, MAN_SCOPED }, /* SH */
53 	{ blk_imp, MAN_SCOPED }, /* SS */
54 	{ blk_imp, MAN_SCOPED | MAN_FSCOPED }, /* TP */
55 	{ blk_imp, 0 }, /* LP */
56 	{ blk_imp, 0 }, /* PP */
57 	{ blk_imp, 0 }, /* P */
58 	{ blk_imp, 0 }, /* IP */
59 	{ blk_imp, 0 }, /* HP */
60 	{ in_line_eoln, MAN_SCOPED }, /* SM */
61 	{ in_line_eoln, MAN_SCOPED }, /* SB */
62 	{ in_line_eoln, 0 }, /* BI */
63 	{ in_line_eoln, 0 }, /* IB */
64 	{ in_line_eoln, 0 }, /* BR */
65 	{ in_line_eoln, 0 }, /* RB */
66 	{ in_line_eoln, MAN_SCOPED }, /* R */
67 	{ in_line_eoln, MAN_SCOPED }, /* B */
68 	{ in_line_eoln, MAN_SCOPED }, /* I */
69 	{ in_line_eoln, 0 }, /* IR */
70 	{ in_line_eoln, 0 }, /* RI */
71 	{ in_line_eoln, MAN_NSCOPED }, /* na */
72 	{ in_line_eoln, 0 }, /* i */
73 	{ in_line_eoln, MAN_NSCOPED }, /* sp */
74 	{ in_line_eoln, 0 }, /* nf */
75 	{ in_line_eoln, 0 }, /* fi */
76 	{ in_line_eoln, 0 }, /* r */
77 	{ blk_close, 0 }, /* RE */
78 	{ blk_exp, MAN_EXPLICIT }, /* RS */
79 	{ in_line_eoln, 0 }, /* DT */
80 	{ in_line_eoln, 0 }, /* UC */
81 	{ in_line_eoln, 0 }, /* PD */
82 	{ in_line_eoln, MAN_NSCOPED }, /* Sp */
83 	{ in_line_eoln, 0 }, /* Vb */
84 	{ in_line_eoln, 0 }, /* Ve */
85 	{ in_line_eoln, 0 }, /* AT */
86 	{ in_line_eoln, 0 }, /* in */
87 };
88 
89 const	struct man_macro * const man_macros = __man_macros;
90 
91 
92 /*
93  * Warn when "n" is an explicit non-roff macro.
94  */
95 static int
96 rew_warn(struct man *m, struct man_node *n, enum mandocerr er)
97 {
98 
99 	if (er == MANDOCERR_MAX || MAN_BLOCK != n->type)
100 		return(1);
101 	if (MAN_VALID & n->flags)
102 		return(1);
103 	if ( ! (MAN_EXPLICIT & man_macros[n->tok].flags))
104 		return(1);
105 	return(man_nmsg(m, n, er));
106 }
107 
108 
109 /*
110  * Rewind scope.  If a code "er" != MANDOCERR_MAX has been provided, it
111  * will be used if an explicit block scope is being closed out.
112  */
113 int
114 man_unscope(struct man *m, const struct man_node *n,
115 		enum mandocerr er)
116 {
117 
118 	assert(n);
119 
120 	/* LINTED */
121 	while (m->last != n) {
122 		if ( ! rew_warn(m, m->last, er))
123 			return(0);
124 		if ( ! man_valid_post(m))
125 			return(0);
126 		if ( ! man_action_post(m))
127 			return(0);
128 		m->last = m->last->parent;
129 		assert(m->last);
130 	}
131 
132 	if ( ! rew_warn(m, m->last, er))
133 		return(0);
134 	if ( ! man_valid_post(m))
135 		return(0);
136 	if ( ! man_action_post(m))
137 		return(0);
138 
139 	m->next = MAN_ROOT == m->last->type ?
140 		MAN_NEXT_CHILD : MAN_NEXT_SIBLING;
141 
142 	return(1);
143 }
144 
145 
146 static enum rew
147 rew_block(enum mant ntok, enum man_type type, const struct man_node *n)
148 {
149 
150 	if (MAN_BLOCK == type && ntok == n->parent->tok &&
151 			MAN_BODY == n->parent->type)
152 		return(REW_REWIND);
153 	return(ntok == n->tok ? REW_HALT : REW_NOHALT);
154 }
155 
156 
157 /*
158  * There are three scope levels: scoped to the root (all), scoped to the
159  * section (all less sections), and scoped to subsections (all less
160  * sections and subsections).
161  */
162 static enum rew
163 rew_dohalt(enum mant tok, enum man_type type, const struct man_node *n)
164 {
165 	enum rew	 c;
166 
167 	/* We cannot progress beyond the root ever. */
168 	if (MAN_ROOT == n->type)
169 		return(REW_HALT);
170 
171 	assert(n->parent);
172 
173 	/* Normal nodes shouldn't go to the level of the root. */
174 	if (MAN_ROOT == n->parent->type)
175 		return(REW_REWIND);
176 
177 	/* Already-validated nodes should be closed out. */
178 	if (MAN_VALID & n->flags)
179 		return(REW_NOHALT);
180 
181 	/* First: rewind to ourselves. */
182 	if (type == n->type && tok == n->tok)
183 		return(REW_REWIND);
184 
185 	/*
186 	 * Next follow the implicit scope-smashings as defined by man.7:
187 	 * section, sub-section, etc.
188 	 */
189 
190 	switch (tok) {
191 	case (MAN_SH):
192 		break;
193 	case (MAN_SS):
194 		/* Rewind to a section, if a block. */
195 		if (REW_NOHALT != (c = rew_block(MAN_SH, type, n)))
196 			return(c);
197 		break;
198 	case (MAN_RS):
199 		/* Rewind to a subsection, if a block. */
200 		if (REW_NOHALT != (c = rew_block(MAN_SS, type, n)))
201 			return(c);
202 		/* Rewind to a section, if a block. */
203 		if (REW_NOHALT != (c = rew_block(MAN_SH, type, n)))
204 			return(c);
205 		break;
206 	default:
207 		/* Rewind to an offsetter, if a block. */
208 		if (REW_NOHALT != (c = rew_block(MAN_RS, type, n)))
209 			return(c);
210 		/* Rewind to a subsection, if a block. */
211 		if (REW_NOHALT != (c = rew_block(MAN_SS, type, n)))
212 			return(c);
213 		/* Rewind to a section, if a block. */
214 		if (REW_NOHALT != (c = rew_block(MAN_SH, type, n)))
215 			return(c);
216 		break;
217 	}
218 
219 	return(REW_NOHALT);
220 }
221 
222 
223 /*
224  * Rewinding entails ascending the parse tree until a coherent point,
225  * for example, the `SH' macro will close out any intervening `SS'
226  * scopes.  When a scope is closed, it must be validated and actioned.
227  */
228 static int
229 rew_scope(enum man_type type, struct man *m, enum mant tok)
230 {
231 	struct man_node	*n;
232 	enum rew	 c;
233 
234 	/* LINTED */
235 	for (n = m->last; n; n = n->parent) {
236 		/*
237 		 * Whether we should stop immediately (REW_HALT), stop
238 		 * and rewind until this point (REW_REWIND), or keep
239 		 * rewinding (REW_NOHALT).
240 		 */
241 		c = rew_dohalt(tok, type, n);
242 		if (REW_HALT == c)
243 			return(1);
244 		if (REW_REWIND == c)
245 			break;
246 	}
247 
248 	/*
249 	 * Rewind until the current point.  Warn if we're a roff
250 	 * instruction that's mowing over explicit scopes.
251 	 */
252 	assert(n);
253 
254 	return(man_unscope(m, n, MANDOCERR_MAX));
255 }
256 
257 
258 /*
259  * Close out a generic explicit macro.
260  */
261 /* ARGSUSED */
262 int
263 blk_close(MACRO_PROT_ARGS)
264 {
265 	enum mant	 	 ntok;
266 	const struct man_node	*nn;
267 
268 	switch (tok) {
269 	case (MAN_RE):
270 		ntok = MAN_RS;
271 		break;
272 	default:
273 		abort();
274 		/* NOTREACHED */
275 	}
276 
277 	for (nn = m->last->parent; nn; nn = nn->parent)
278 		if (ntok == nn->tok)
279 			break;
280 
281 	if (NULL == nn)
282 		if ( ! man_pmsg(m, line, ppos, MANDOCERR_NOSCOPE))
283 			return(0);
284 
285 	if ( ! rew_scope(MAN_BODY, m, ntok))
286 		return(0);
287 	if ( ! rew_scope(MAN_BLOCK, m, ntok))
288 		return(0);
289 
290 	return(1);
291 }
292 
293 
294 /* ARGSUSED */
295 int
296 blk_exp(MACRO_PROT_ARGS)
297 {
298 	int		 w, la;
299 	char		*p;
300 
301 	/*
302 	 * Close out prior scopes.  "Regular" explicit macros cannot be
303 	 * nested, but we allow roff macros to be placed just about
304 	 * anywhere.
305 	 */
306 
307 	if ( ! rew_scope(MAN_BODY, m, tok))
308 		return(0);
309 	if ( ! rew_scope(MAN_BLOCK, m, tok))
310 		return(0);
311 
312 	if ( ! man_block_alloc(m, line, ppos, tok))
313 		return(0);
314 	if ( ! man_head_alloc(m, line, ppos, tok))
315 		return(0);
316 
317 	for (;;) {
318 		la = *pos;
319 		w = man_args(m, line, pos, buf, &p);
320 
321 		if (-1 == w)
322 			return(0);
323 		if (0 == w)
324 			break;
325 
326 		if ( ! man_word_alloc(m, line, la, p))
327 			return(0);
328 	}
329 
330 	assert(m);
331 	assert(tok != MAN_MAX);
332 
333 	if ( ! rew_scope(MAN_HEAD, m, tok))
334 		return(0);
335 	return(man_body_alloc(m, line, ppos, tok));
336 }
337 
338 
339 
340 /*
341  * Parse an implicit-block macro.  These contain a MAN_HEAD and a
342  * MAN_BODY contained within a MAN_BLOCK.  Rules for closing out other
343  * scopes, such as `SH' closing out an `SS', are defined in the rew
344  * routines.
345  */
346 /* ARGSUSED */
347 int
348 blk_imp(MACRO_PROT_ARGS)
349 {
350 	int		 w, la;
351 	char		*p;
352 	struct man_node	*n;
353 
354 	/* Close out prior scopes. */
355 
356 	if ( ! rew_scope(MAN_BODY, m, tok))
357 		return(0);
358 	if ( ! rew_scope(MAN_BLOCK, m, tok))
359 		return(0);
360 
361 	/* Allocate new block & head scope. */
362 
363 	if ( ! man_block_alloc(m, line, ppos, tok))
364 		return(0);
365 	if ( ! man_head_alloc(m, line, ppos, tok))
366 		return(0);
367 
368 	n = m->last;
369 
370 	/* Add line arguments. */
371 
372 	for (;;) {
373 		la = *pos;
374 		w = man_args(m, line, pos, buf, &p);
375 
376 		if (-1 == w)
377 			return(0);
378 		if (0 == w)
379 			break;
380 
381 		if ( ! man_word_alloc(m, line, la, p))
382 			return(0);
383 	}
384 
385 	/* Close out head and open body (unless MAN_SCOPE). */
386 
387 	if (MAN_SCOPED & man_macros[tok].flags) {
388 		/* If we're forcing scope (`TP'), keep it open. */
389 		if (MAN_FSCOPED & man_macros[tok].flags) {
390 			m->flags |= MAN_BLINE;
391 			return(1);
392 		} else if (n == m->last) {
393 			m->flags |= MAN_BLINE;
394 			return(1);
395 		}
396 	}
397 
398 	if ( ! rew_scope(MAN_HEAD, m, tok))
399 		return(0);
400 	return(man_body_alloc(m, line, ppos, tok));
401 }
402 
403 
404 /* ARGSUSED */
405 int
406 in_line_eoln(MACRO_PROT_ARGS)
407 {
408 	int		 w, la;
409 	char		*p;
410 	struct man_node	*n;
411 
412 	if ( ! man_elem_alloc(m, line, ppos, tok))
413 		return(0);
414 
415 	n = m->last;
416 
417 	for (;;) {
418 		la = *pos;
419 		w = man_args(m, line, pos, buf, &p);
420 
421 		if (-1 == w)
422 			return(0);
423 		if (0 == w)
424 			break;
425 		if ( ! man_word_alloc(m, line, la, p))
426 			return(0);
427 	}
428 
429 	/*
430 	 * If no arguments are specified and this is MAN_SCOPED (i.e.,
431 	 * next-line scoped), then set our mode to indicate that we're
432 	 * waiting for terms to load into our context.
433 	 */
434 
435 	if (n == m->last && MAN_SCOPED & man_macros[tok].flags) {
436 		assert( ! (MAN_NSCOPED & man_macros[tok].flags));
437 		m->flags |= MAN_ELINE;
438 		return(1);
439 	}
440 
441 	/* Set ignorable context, if applicable. */
442 
443 	if (MAN_NSCOPED & man_macros[tok].flags) {
444 		assert( ! (MAN_SCOPED & man_macros[tok].flags));
445 		m->flags |= MAN_ILINE;
446 	}
447 
448 	/*
449 	 * Rewind our element scope.  Note that when TH is pruned, we'll
450 	 * be back at the root, so make sure that we don't clobber as
451 	 * its sibling.
452 	 */
453 
454 	for ( ; m->last; m->last = m->last->parent) {
455 		if (m->last == n)
456 			break;
457 		if (m->last->type == MAN_ROOT)
458 			break;
459 		if ( ! man_valid_post(m))
460 			return(0);
461 		if ( ! man_action_post(m))
462 			return(0);
463 	}
464 
465 	assert(m->last);
466 
467 	/*
468 	 * Same here regarding whether we're back at the root.
469 	 */
470 
471 	if (m->last->type != MAN_ROOT && ! man_valid_post(m))
472 		return(0);
473 	if (m->last->type != MAN_ROOT && ! man_action_post(m))
474 		return(0);
475 
476 	m->next = MAN_ROOT == m->last->type ?
477 		MAN_NEXT_CHILD : MAN_NEXT_SIBLING;
478 
479 	return(1);
480 }
481 
482 
483 int
484 man_macroend(struct man *m)
485 {
486 
487 	return(man_unscope(m, m->first, MANDOCERR_SCOPEEXIT));
488 }
489 
490