xref: /plan9/sys/src/cmd/9660srv/iobuf.c (revision f9e1cf08d3be51592e03e639fc848a68dc31a55e)
1 #include <u.h>
2 #include <libc.h>
3 #include <auth.h>
4 #include <fcall.h>
5 #include "dat.h"
6 #include "fns.h"
7 
8 /*
9  * We used to use 100 i/o buffers of size 2kb (Sectorsize).
10  * Unfortunately, reading 2kb at a time often hopping around
11  * the disk doesn't let us get near the disk bandwidth.
12  *
13  * Based on a trace of iobuf address accesses taken while
14  * tarring up a Plan 9 distribution CD, we now use 16 128kb
15  * buffers.  This works for ISO9660 because data is required
16  * to be laid out contiguously; effectively we're doing agressive
17  * readahead.  Because the buffers are so big and the typical
18  * disk accesses so concentrated, it's okay that we have so few
19  * of them.
20  *
21  * If this is used to access multiple discs at once, it's not clear
22  * how gracefully the scheme degrades, but I'm not convinced
23  * it's worth worrying about.		-rsc
24  */
25 
26 #define	BUFPERCLUST	64	/* 64*Sectorsize = 128kb */
27 #define	NCLUST		16
28 
29 int nclust = NCLUST;
30 
31 static Ioclust*	iohead;
32 static Ioclust*	iotail;
33 static Ioclust*	getclust(Xdata*, long);
34 static void	putclust(Ioclust*);
35 static void xread(Ioclust*);
36 
37 void
38 iobuf_init(void)
39 {
40 	int i, j, n;
41 	Ioclust *c;
42 	Iobuf *b;
43 	uchar *mem;
44 
45 	n = nclust*sizeof(Ioclust) +
46 		nclust*BUFPERCLUST*(sizeof(Iobuf)+Sectorsize);
47 	mem = sbrk(n);
48 	if(mem == (void*)-1)
49 		panic(0, "iobuf_init");
50 	memset(mem, 0, n);
51 
52 	for(i=0; i<nclust; i++){
53 		c = (Ioclust*)mem;
54 		mem += sizeof(Ioclust);
55 		c->addr = -1;
56 		c->prev = iotail;
57 		if(iotail)
58 			iotail->next = c;
59 		iotail = c;
60 		if(iohead == nil)
61 			iohead = c;
62 
63 		c->buf = (Iobuf*)mem;
64 		mem += BUFPERCLUST*sizeof(Iobuf);
65 		c->iobuf = mem;
66 		mem += BUFPERCLUST*Sectorsize;
67 		for(j=0; j<BUFPERCLUST; j++){
68 			b = &c->buf[j];
69 			b->clust = c;
70 			b->addr = -1;
71 			b->iobuf = c->iobuf+j*Sectorsize;
72 		}
73 	}
74 }
75 
76 void
77 purgebuf(Xdata *dev)
78 {
79 	Ioclust *p;
80 
81 	for(p=iohead; p!=nil; p=p->next)
82 		if(p->dev == dev){
83 			p->addr = -1;
84 			p->busy = 0;
85 		}
86 }
87 
88 static Ioclust*
89 getclust(Xdata *dev, long addr)
90 {
91 	Ioclust *c, *f;
92 
93 	f = nil;
94 	for(c=iohead; c; c=c->next){
95 		if(!c->busy)
96 			f = c;
97 		if(c->addr == addr && c->dev == dev){
98 			c->busy++;
99 			return c;
100 		}
101 	}
102 
103 	if(f == nil)
104 		panic(0, "out of buffers");
105 
106 	f->addr = addr;
107 	f->dev = dev;
108 	f->busy++;
109 	if(waserror()){
110 		f->addr = -1;	/* stop caching */
111 		putclust(f);
112 		nexterror();
113 	}
114 	xread(f);
115 	poperror();
116 	return f;
117 }
118 
119 static void
120 putclust(Ioclust *c)
121 {
122 	if(c->busy <= 0)
123 		panic(0, "putbuf");
124 	c->busy--;
125 
126 	/* Link onto head for LRU */
127 	if(c == iohead)
128 		return;
129 	c->prev->next = c->next;
130 
131 	if(c->next)
132 		c->next->prev = c->prev;
133 	else
134 		iotail = c->prev;
135 
136 	c->prev = nil;
137 	c->next = iohead;
138 	iohead->prev = c;
139 	iohead = c;
140 }
141 
142 Iobuf*
143 getbuf(Xdata *dev, long addr)
144 {
145 	int off;
146 	Ioclust *c;
147 
148 	off = addr%BUFPERCLUST;
149 	c = getclust(dev, addr - off);
150 	if(c->nbuf < off){
151 		c->busy--;
152 		error("I/O read error");
153 	}
154 	return &c->buf[off];
155 }
156 
157 void
158 putbuf(Iobuf *b)
159 {
160 	putclust(b->clust);
161 }
162 
163 static void
164 xread(Ioclust *c)
165 {
166 	int n;
167 	vlong addr;
168 	Xdata *dev;
169 
170 	dev = c->dev;
171 	addr = c->addr;
172 	seek(dev->dev, addr*Sectorsize, 0);
173 	n = readn(dev->dev, c->iobuf, BUFPERCLUST*Sectorsize);
174 	if(n < Sectorsize)
175 		error("I/O read error");
176 	c->nbuf = n/Sectorsize;
177 }
178