xref: /openbsd-src/usr.bin/mg/undo.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: undo.c,v 1.55 2014/03/20 07:47:29 lum Exp $ */
2 /*
3  * This file is in the public domain
4  */
5 
6 #include "def.h"
7 #include "kbd.h"
8 
9 #define MAX_FREE_RECORDS	32
10 
11 /*
12  * Local variables
13  */
14 static struct undoq		 undo_free;
15 static int			 undo_free_num;
16 static int			 boundary_flag = TRUE;
17 static int			 undo_enable_flag = TRUE;
18 
19 /*
20  * Local functions
21  */
22 static int find_dot(struct line *, int);
23 static int find_lo(int, struct line **, int *, int *);
24 static struct undo_rec *new_undo_record(void);
25 static int drop_oldest_undo_record(void);
26 
27 /*
28  * find_dot, find_lo()
29  *
30  * Find an absolute dot in the buffer from a line/offset pair, and vice-versa.
31  *
32  * Since lines can be deleted while they are referenced by undo record, we
33  * need to have an absolute dot to have something reliable.
34  */
35 static int
36 find_dot(struct line *lp, int off)
37 {
38 	int	 count = 0;
39 	struct line	*p;
40 
41 	for (p = curbp->b_headp; p != lp; p = lforw(p)) {
42 		if (count != 0) {
43 			if (p == curbp->b_headp) {
44 				dobeep();
45 				ewprintf("Error: Undo stuff called with a"
46 				    "nonexistent line");
47 				return (FALSE);
48 			}
49 		}
50 		count += llength(p) + 1;
51 	}
52 	count += off;
53 
54 	return (count);
55 }
56 
57 static int
58 find_lo(int pos, struct line **olp, int *offset, int *lnum)
59 {
60 	struct line *p;
61 	int lineno;
62 
63 	p = curbp->b_headp;
64 	lineno = 0;
65 	while (pos > llength(p)) {
66 		pos -= llength(p) + 1;
67 		if ((p = lforw(p)) == curbp->b_headp) {
68 			*olp = NULL;
69 			*offset = 0;
70 			return (FALSE);
71 		}
72 		lineno++;
73 	}
74 	*olp = p;
75 	*offset = pos;
76 	*lnum = lineno;
77 
78 	return (TRUE);
79 }
80 
81 static struct undo_rec *
82 new_undo_record(void)
83 {
84 	struct undo_rec *rec;
85 
86 	rec = TAILQ_FIRST(&undo_free);
87 	if (rec != NULL) {
88 		/* Remove it from the free-list */
89 		TAILQ_REMOVE(&undo_free, rec, next);
90 		undo_free_num--;
91 	} else {
92 		if ((rec = malloc(sizeof(*rec))) == NULL)
93 			panic("Out of memory in undo code (record)");
94 	}
95 	memset(rec, 0, sizeof(struct undo_rec));
96 
97 	return (rec);
98 }
99 
100 void
101 free_undo_record(struct undo_rec *rec)
102 {
103 	static int initialised = 0;
104 
105 	/*
106 	 * On the first run, do initialisation of the free list.
107 	 */
108 	if (initialised == 0) {
109 		TAILQ_INIT(&undo_free);
110 		initialised = 1;
111 	}
112 	if (rec->content != NULL) {
113 		free(rec->content);
114 		rec->content = NULL;
115 	}
116 	if (undo_free_num >= MAX_FREE_RECORDS) {
117 		free(rec);
118 		return;
119 	}
120 	undo_free_num++;
121 
122 	TAILQ_INSERT_HEAD(&undo_free, rec, next);
123 }
124 
125 /*
126  * Drop the oldest undo record in our list. Return 1 if we could remove it,
127  * 0 if the undo list was empty.
128  */
129 static int
130 drop_oldest_undo_record(void)
131 {
132 	struct undo_rec *rec;
133 
134 	rec = TAILQ_LAST(&curbp->b_undo, undoq);
135 	if (rec != NULL) {
136 		undo_free_num--;
137 		TAILQ_REMOVE(&curbp->b_undo, rec, next);
138 		free_undo_record(rec);
139 		return (1);
140 	}
141 	return (0);
142 }
143 
144 static int
145 lastrectype(void)
146 {
147 	struct undo_rec *rec;
148 
149 	if ((rec = TAILQ_FIRST(&curbp->b_undo)) != NULL)
150 		return (rec->type);
151 	return (0);
152 }
153 
154 /*
155  * Returns TRUE if undo is enabled, FALSE otherwise.
156  */
157 int
158 undo_enabled(void)
159 {
160 	return (undo_enable_flag);
161 }
162 
163 /*
164  * undo_enable: toggle undo_enable.
165  * Returns the previous value of the flag.
166  */
167 int
168 undo_enable(int f, int n)
169 {
170 	int pon = undo_enable_flag;
171 
172 	if (f & (FFARG | FFRAND))
173 		undo_enable_flag = n > 0;
174 	else
175 		undo_enable_flag = !undo_enable_flag;
176 
177 	if (!(f & FFRAND))
178 		ewprintf("Undo %sabled", undo_enable_flag ? "en" : "dis");
179 
180 	return (pon);
181 }
182 
183 /*
184  * If undo is enabled, then:
185  *  Toggle undo boundary recording.
186  *  If called with an argument, (n > 0) => enable. Otherwise disable.
187  * In either case, add an undo boundary
188  * If undo is disabled, this function has no effect.
189  */
190 int
191 undo_boundary_enable(int f, int n)
192 {
193 	int bon = boundary_flag;
194 
195 	if (!undo_enable_flag)
196 		return (FALSE);
197 
198 	undo_add_boundary(FFRAND, 1);
199 
200 	if (f & (FFARG | FFRAND))
201 		boundary_flag = n > 0;
202 	else
203 		boundary_flag = !boundary_flag;
204 
205 	if (!(f & FFRAND))
206 		ewprintf("Undo boundaries %sabled",
207 		    boundary_flag ? "en" : "dis");
208 
209 	return (bon);
210 }
211 
212 /*
213  * Record an undo boundary, unless boundary_flag == FALSE.
214  * Does nothing if previous undo entry is already a boundary or 'modified' flag.
215  */
216 int
217 undo_add_boundary(int f, int n)
218 {
219 	struct undo_rec *rec;
220 	int last;
221 
222 	if (boundary_flag == FALSE)
223 		return (FALSE);
224 
225 	last = lastrectype();
226 	if (last == BOUNDARY || last == MODIFIED)
227 		return (TRUE);
228 
229 	rec = new_undo_record();
230 	rec->type = BOUNDARY;
231 
232 	TAILQ_INSERT_HEAD(&curbp->b_undo, rec, next);
233 
234 	return (TRUE);
235 }
236 
237 /*
238  * Record an undo "modified" boundary
239  */
240 void
241 undo_add_modified(void)
242 {
243 	struct undo_rec *rec, *trec;
244 
245 	TAILQ_FOREACH_SAFE(rec, &curbp->b_undo, next, trec)
246 		if (rec->type == MODIFIED) {
247 			TAILQ_REMOVE(&curbp->b_undo, rec, next);
248 			free_undo_record(rec);
249 		}
250 
251 	rec = new_undo_record();
252 	rec->type = MODIFIED;
253 
254 	TAILQ_INSERT_HEAD(&curbp->b_undo, rec, next);
255 
256 	return;
257 }
258 
259 int
260 undo_add_insert(struct line *lp, int offset, int size)
261 {
262 	struct region	reg;
263 	struct	undo_rec *rec;
264 	int	pos;
265 
266 	if (!undo_enable_flag)
267 		return (TRUE);
268 	reg.r_linep = lp;
269 	reg.r_offset = offset;
270 	reg.r_size = size;
271 
272 	pos = find_dot(lp, offset);
273 
274 	/*
275 	 * We try to reuse the last undo record to `compress' things.
276 	 */
277 	rec = TAILQ_FIRST(&curbp->b_undo);
278 	if (rec != NULL && rec->type == INSERT) {
279 		if (rec->pos + rec->region.r_size == pos) {
280 			rec->region.r_size += reg.r_size;
281 			return (TRUE);
282 		}
283 	}
284 
285 	/*
286 	 * We couldn't reuse the last undo record, so prepare a new one.
287 	 */
288 	rec = new_undo_record();
289 	rec->pos = pos;
290 	rec->type = INSERT;
291 	memmove(&rec->region, &reg, sizeof(struct region));
292 	rec->content = NULL;
293 
294 	undo_add_boundary(FFRAND, 1);
295 
296 	TAILQ_INSERT_HEAD(&curbp->b_undo, rec, next);
297 
298 	return (TRUE);
299 }
300 
301 /*
302  * This of course must be done _before_ the actual deletion is done.
303  */
304 int
305 undo_add_delete(struct line *lp, int offset, int size, int isreg)
306 {
307 	struct region	reg;
308 	struct	undo_rec *rec;
309 	int	pos;
310 
311 	if (!undo_enable_flag)
312 		return (TRUE);
313 
314 	reg.r_linep = lp;
315 	reg.r_offset = offset;
316 	reg.r_size = size;
317 
318 	pos = find_dot(lp, offset);
319 
320 	if (offset == llength(lp))	/* if it's a newline... */
321 		undo_add_boundary(FFRAND, 1);
322 	else if ((rec = TAILQ_FIRST(&curbp->b_undo)) != NULL) {
323 		/*
324 		 * Separate this command from the previous one if we're not
325 		 * just before the previous record...
326 		 */
327 		if (!isreg && rec->type == DELETE) {
328 			if (rec->pos - rec->region.r_size != pos)
329 				undo_add_boundary(FFRAND, 1);
330 		}
331 	}
332 	rec = new_undo_record();
333 	rec->pos = pos;
334 	if (isreg)
335 		rec->type = DELREG;
336 	else
337 		rec->type = DELETE;
338 	memmove(&rec->region, &reg, sizeof(struct region));
339 	do {
340 		rec->content = malloc(reg.r_size + 1);
341 	} while ((rec->content == NULL) && drop_oldest_undo_record());
342 
343 	if (rec->content == NULL)
344 		panic("Out of memory");
345 
346 	region_get_data(&reg, rec->content, reg.r_size);
347 
348 	if (isreg || lastrectype() != DELETE)
349 		undo_add_boundary(FFRAND, 1);
350 
351 	TAILQ_INSERT_HEAD(&curbp->b_undo, rec, next);
352 
353 	return (TRUE);
354 }
355 
356 /*
357  * This of course must be called before the change takes place.
358  */
359 int
360 undo_add_change(struct line *lp, int offset, int size)
361 {
362 	if (!undo_enable_flag)
363 		return (TRUE);
364 	undo_add_boundary(FFRAND, 1);
365 	boundary_flag = FALSE;
366 	undo_add_delete(lp, offset, size, 0);
367 	undo_add_insert(lp, offset, size);
368 	boundary_flag = TRUE;
369 	undo_add_boundary(FFRAND, 1);
370 
371 	return (TRUE);
372 }
373 
374 /*
375  * Show the undo records for the current buffer in a new buffer.
376  */
377 /* ARGSUSED */
378 int
379 undo_dump(int f, int n)
380 {
381 	struct	 undo_rec *rec;
382 	struct buffer	*bp;
383 	struct mgwin	*wp;
384 	char	 buf[4096], tmp[1024];
385 	int	 num;
386 
387 	/*
388 	 * Prepare the buffer for insertion.
389 	 */
390 	if ((bp = bfind("*undo*", TRUE)) == NULL)
391 		return (FALSE);
392 	bp->b_flag |= BFREADONLY;
393 	bclear(bp);
394 	if ((wp = popbuf(bp, WNONE)) == NULL)
395 		return (FALSE);
396 
397 	for (wp = wheadp; wp != NULL; wp = wp->w_wndp) {
398 		if (wp->w_bufp == bp) {
399 			wp->w_dotp = bp->b_headp;
400 			wp->w_doto = 0;
401 		}
402 	}
403 
404 	num = 0;
405 	TAILQ_FOREACH(rec, &curbp->b_undo, next) {
406 		num++;
407 		snprintf(buf, sizeof(buf),
408 		    "%d:\t %s at %d ", num,
409 		    (rec->type == DELETE) ? "DELETE":
410 		    (rec->type == DELREG) ? "DELREGION":
411 		    (rec->type == INSERT) ? "INSERT":
412 		    (rec->type == BOUNDARY) ? "----" :
413 		    (rec->type == MODIFIED) ? "MODIFIED": "UNKNOWN",
414 		    rec->pos);
415 
416 		if (rec->content) {
417 			(void)strlcat(buf, "\"", sizeof(buf));
418 			snprintf(tmp, sizeof(tmp), "%.*s", rec->region.r_size,
419 			    rec->content);
420 			(void)strlcat(buf, tmp, sizeof(buf));
421 			(void)strlcat(buf, "\"", sizeof(buf));
422 		}
423 		snprintf(tmp, sizeof(tmp), " [%d]", rec->region.r_size);
424 		if (strlcat(buf, tmp, sizeof(buf)) >= sizeof(buf)) {
425 			dobeep();
426 			ewprintf("Undo record too large. Aborted.");
427 			return (FALSE);
428 		}
429 		addlinef(bp, "%s", buf);
430 	}
431 	for (wp = wheadp; wp != NULL; wp = wp->w_wndp) {
432 		if (wp->w_bufp == bp) {
433 			wp->w_dotline = num+1;
434 			wp->w_rflag |= WFFULL;
435 		}
436 	}
437 	return (TRUE);
438 }
439 
440 /*
441  * After the user did action1, then action2, then action3:
442  *
443  *	[action3] <--- Undoptr
444  *	[action2]
445  *	[action1]
446  *	 ------
447  *	 [undo]
448  *
449  * After undo:
450  *
451  *	[undo of action3]
452  *	[action2] <--- Undoptr
453  *	[action1]
454  *	 ------
455  *	 [undo]
456  *
457  * After another undo:
458  *
459  *
460  *	[undo of action2]
461  *	[undo of action3]
462  *	[action1]  <--- Undoptr
463  *	 ------
464  *	 [undo]
465  *
466  * Note that the "undo of actionX" have no special meaning. Only when
467  * we undo a deletion, the insertion will be recorded just as if it
468  * was typed on the keyboard. Resulting in the inverse operation being
469  * saved in the list.
470  *
471  * If undoptr reaches the bottom of the list, or if we moved between
472  * two undo actions, we make it point back at the topmost record. This is
473  * how we handle redoing.
474  */
475 /* ARGSUSED */
476 int
477 undo(int f, int n)
478 {
479 	struct undo_rec	*ptr, *nptr;
480 	int		 done, rval;
481 	struct line	*lp;
482 	int		 offset, save;
483 	static int	 nulled = FALSE;
484 	int		 lineno;
485 
486 	if (n < 0)
487 		return (FALSE);
488 
489 	ptr = curbp->b_undoptr;
490 
491 	/* first invocation, make ptr point back to the top of the list */
492 	if ((ptr == NULL && nulled == TRUE) ||  rptcount == 0) {
493 		ptr = TAILQ_FIRST(&curbp->b_undo);
494 		nulled = TRUE;
495 	}
496 
497 	rval = TRUE;
498 	while (n--) {
499 		/* if we have a spurious boundary, free it and move on.... */
500 		while (ptr && ptr->type == BOUNDARY) {
501 			nptr = TAILQ_NEXT(ptr, next);
502 			TAILQ_REMOVE(&curbp->b_undo, ptr, next);
503 			free_undo_record(ptr);
504 			ptr = nptr;
505 		}
506 		/*
507 		 * Ptr is NULL, but on the next run, it will point to the
508 		 * top again, redoing all stuff done in the buffer since
509 		 * its creation.
510 		 */
511 		if (ptr == NULL) {
512 			dobeep();
513 			ewprintf("No further undo information");
514 			rval = FALSE;
515 			nulled = TRUE;
516 			break;
517 		}
518 		nulled = FALSE;
519 
520 		/*
521 		 * Loop while we don't get a boundary specifying we've
522 		 * finished the current action...
523 		 */
524 
525 		undo_add_boundary(FFRAND, 1);
526 
527 		save = boundary_flag;
528 		boundary_flag = FALSE;
529 
530 		done = 0;
531 		do {
532 			/*
533 			 * Move to where this has to apply
534 			 *
535 			 * Boundaries (and the modified flag)  are put as
536 			 * position 0 (to save lookup time in find_dot)
537 			 * so we must not move there...
538 			 */
539 			if (ptr->type != BOUNDARY && ptr->type != MODIFIED) {
540 				if (find_lo(ptr->pos, &lp,
541 				    &offset, &lineno) == FALSE) {
542 					dobeep();
543 					ewprintf("Internal error in Undo!");
544 					rval = FALSE;
545 					break;
546 				}
547 				curwp->w_dotp = lp;
548 				curwp->w_doto = offset;
549 				curwp->w_markline = curwp->w_dotline;
550 				curwp->w_dotline = lineno;
551 			}
552 
553 			/*
554 			 * Do operation^-1
555 			 */
556 			switch (ptr->type) {
557 			case INSERT:
558 				ldelete(ptr->region.r_size, KNONE);
559 				break;
560 			case DELETE:
561 				lp = curwp->w_dotp;
562 				offset = curwp->w_doto;
563 				region_put_data(ptr->content,
564 				    ptr->region.r_size);
565 				curwp->w_dotp = lp;
566 				curwp->w_doto = offset;
567 				break;
568 			case DELREG:
569 				region_put_data(ptr->content,
570 				    ptr->region.r_size);
571 				break;
572 			case BOUNDARY:
573 				done = 1;
574 				break;
575 			case MODIFIED:
576 				curbp->b_flag &= ~BFCHG;
577 				break;
578 			default:
579 				break;
580 			}
581 
582 			/* And move to next record */
583 			ptr = TAILQ_NEXT(ptr, next);
584 		} while (ptr != NULL && !done);
585 
586 		boundary_flag = save;
587 		undo_add_boundary(FFRAND, 1);
588 
589 		ewprintf("Undo!");
590 	}
591 
592 	curbp->b_undoptr = ptr;
593 
594 	return (rval);
595 }
596