1 /* libbdev - block device interfacing library, by D.C. van Moolenbroek */
2
3 #include <minix/drivers.h>
4 #include <minix/bdev.h>
5 #include <minix/ioctl.h>
6 #include <assert.h>
7
8 #include "const.h"
9 #include "type.h"
10 #include "proto.h"
11
bdev_driver(dev_t dev,char * label)12 void bdev_driver(dev_t dev, char *label)
13 {
14 /* Associate a driver with the given (major) device, using its endpoint.
15 * File system usage note: typically called from mount and newdriver.
16 */
17 static int first = TRUE;
18
19 if (first) {
20 /* Initialize the driver endpoint array. */
21 bdev_driver_init();
22
23 first = FALSE;
24 }
25
26 bdev_update(dev, label);
27 }
28
bdev_retry(int * driver_tries,int * transfer_tries,int * result)29 static int bdev_retry(int *driver_tries, int *transfer_tries, int *result)
30 {
31 /* Return TRUE iff the call result implies that we should retry the operation.
32 */
33
34 switch (*result) {
35 case ERESTART:
36 /* We get this error internally if the driver has restarted and the
37 * current operation may now go through. Check the retry count for
38 * driver restarts first, as we don't want to keep trying forever.
39 */
40 if (++*driver_tries < DRIVER_TRIES)
41 return TRUE;
42
43 *result = EDEADSRCDST;
44
45 break;
46
47 case EIO:
48 /* The 'transfer_tries' pointer is non-NULL if this was a transfer
49 * request. If we get back an I/O failure, keep retrying the request
50 * until we hit the transfer retry limit.
51 */
52 if (transfer_tries != NULL && ++*transfer_tries < TRANSFER_TRIES)
53 return TRUE;
54
55 break;
56 }
57
58 return FALSE;
59 }
60
bdev_opcl(int req,dev_t dev,int bits)61 static int bdev_opcl(int req, dev_t dev, int bits)
62 {
63 /* Open or close the given minor device.
64 */
65 message m;
66 int r, driver_tries = 0;
67
68 do {
69 memset(&m, 0, sizeof(m));
70 m.m_type = req;
71 m.m_lbdev_lblockdriver_msg.minor = minor(dev);
72 m.m_lbdev_lblockdriver_msg.access = bits;
73
74 r = bdev_sendrec(dev, &m);
75 } while (bdev_retry(&driver_tries, NULL, &r));
76
77 return r;
78 }
79
bdev_open(dev_t dev,int bits)80 int bdev_open(dev_t dev, int bits)
81 {
82 /* Open the given minor device.
83 * File system usage note: typically called from mount, after bdev_driver.
84 */
85 int r;
86
87 r = bdev_opcl(BDEV_OPEN, dev, bits);
88
89 if (r == OK)
90 bdev_minor_add(dev, bits);
91
92 return r;
93 }
94
bdev_close(dev_t dev)95 int bdev_close(dev_t dev)
96 {
97 /* Close the given minor device.
98 * File system usage note: typically called from unmount.
99 */
100 int r;
101
102 bdev_flush_asyn(dev);
103
104 r = bdev_opcl(BDEV_CLOSE, dev, 0);
105
106 if (r == OK)
107 bdev_minor_del(dev);
108
109 return r;
110 }
111
bdev_rdwt_setup(int req,dev_t dev,u64_t pos,char * buf,size_t count,int flags,message * m)112 static int bdev_rdwt_setup(int req, dev_t dev, u64_t pos, char *buf,
113 size_t count, int flags, message *m)
114 {
115 /* Set up a single-buffer read/write request.
116 */
117 endpoint_t endpt;
118 cp_grant_id_t grant;
119 int perm;
120
121 assert((ssize_t) count >= 0);
122
123 if ((endpt = bdev_driver_get(dev)) == NONE)
124 return EDEADSRCDST;
125
126 perm = (req == BDEV_READ) ? CPF_WRITE : CPF_READ;
127
128 grant = cpf_grant_direct(endpt, (vir_bytes) buf, count, perm);
129
130 if (!GRANT_VALID(grant)) {
131 printf("bdev: unable to allocate grant!\n");
132 return EINVAL;
133 }
134
135 memset(m, 0, sizeof(*m));
136 m->m_type = req;
137 m->m_lbdev_lblockdriver_msg.minor = minor(dev);
138 m->m_lbdev_lblockdriver_msg.pos = pos;
139 m->m_lbdev_lblockdriver_msg.count = count;
140 m->m_lbdev_lblockdriver_msg.grant = grant;
141 m->m_lbdev_lblockdriver_msg.flags = flags;
142
143 return OK;
144 }
145
bdev_rdwt_cleanup(const message * m)146 static void bdev_rdwt_cleanup(const message *m)
147 {
148 /* Clean up a single-buffer read/write request.
149 */
150
151 cpf_revoke(m->m_lbdev_lblockdriver_msg.grant);
152 }
153
bdev_rdwt(int req,dev_t dev,u64_t pos,char * buf,size_t count,int flags)154 static ssize_t bdev_rdwt(int req, dev_t dev, u64_t pos, char *buf,
155 size_t count, int flags)
156 {
157 /* Perform a synchronous read or write call using a single buffer.
158 */
159 message m;
160 int r, driver_tries = 0, transfer_tries = 0;
161
162 do {
163 if ((r = bdev_rdwt_setup(req, dev, pos, buf, count, flags, &m)) != OK)
164 break;
165
166 r = bdev_sendrec(dev, &m);
167
168 bdev_rdwt_cleanup(&m);
169 } while (bdev_retry(&driver_tries, &transfer_tries, &r));
170
171 return r;
172 }
173
bdev_vrdwt_setup(int req,dev_t dev,u64_t pos,iovec_t * vec,int count,int flags,message * m,iovec_s_t * gvec)174 static int bdev_vrdwt_setup(int req, dev_t dev, u64_t pos, iovec_t *vec,
175 int count, int flags, message *m, iovec_s_t *gvec)
176 {
177 /* Set up a vectored read/write request.
178 */
179 ssize_t size;
180 endpoint_t endpt;
181 cp_grant_id_t grant;
182 int i, perm;
183
184 assert(count <= NR_IOREQS);
185
186 if ((endpt = bdev_driver_get(dev)) == NONE)
187 return EDEADSRCDST;
188
189 perm = (req == BDEV_GATHER) ? CPF_WRITE : CPF_READ;
190 size = 0;
191
192 for (i = 0; i < count; i++) {
193 grant = cpf_grant_direct(endpt, vec[i].iov_addr, vec[i].iov_size,
194 perm);
195
196 if (!GRANT_VALID(grant)) {
197 printf("bdev: unable to allocate grant!\n");
198
199 for (i--; i >= 0; i--)
200 cpf_revoke(gvec[i].iov_grant);
201
202 return EINVAL;
203 }
204
205 gvec[i].iov_grant = grant;
206 gvec[i].iov_size = vec[i].iov_size;
207
208 assert(vec[i].iov_size > 0);
209 assert((ssize_t) (size + vec[i].iov_size) > size);
210
211 size += vec[i].iov_size;
212 }
213
214 grant = cpf_grant_direct(endpt, (vir_bytes) gvec, sizeof(gvec[0]) * count,
215 CPF_READ);
216
217 if (!GRANT_VALID(grant)) {
218 printf("bdev: unable to allocate grant!\n");
219
220 for (i = count - 1; i >= 0; i--)
221 cpf_revoke(gvec[i].iov_grant);
222
223 return EINVAL;
224 }
225
226 memset(m, 0, sizeof(*m));
227 m->m_type = req;
228 m->m_lbdev_lblockdriver_msg.minor = minor(dev);
229 m->m_lbdev_lblockdriver_msg.pos = pos;
230 m->m_lbdev_lblockdriver_msg.count = count;
231 m->m_lbdev_lblockdriver_msg.grant = grant;
232 m->m_lbdev_lblockdriver_msg.flags = flags;
233
234 return OK;
235 }
236
bdev_vrdwt_cleanup(const message * m,iovec_s_t * gvec)237 static void bdev_vrdwt_cleanup(const message *m, iovec_s_t *gvec)
238 {
239 /* Clean up a vectored read/write request.
240 */
241 cp_grant_id_t grant;
242 int i;
243
244 grant = m->m_lbdev_lblockdriver_msg.grant;
245
246 cpf_revoke(grant);
247
248 for (i = m->m_lbdev_lblockdriver_msg.count - 1; i >= 0; i--)
249 cpf_revoke(gvec[i].iov_grant);
250 }
251
bdev_vrdwt(int req,dev_t dev,u64_t pos,iovec_t * vec,int count,int flags)252 static ssize_t bdev_vrdwt(int req, dev_t dev, u64_t pos, iovec_t *vec,
253 int count, int flags)
254 {
255 /* Perform a synchronous read or write call using a vector of buffers.
256 */
257 iovec_s_t gvec[NR_IOREQS];
258 message m;
259 int r, driver_tries = 0, transfer_tries = 0;
260
261 do {
262 if ((r = bdev_vrdwt_setup(req, dev, pos, vec, count, flags, &m,
263 gvec)) != OK)
264 break;
265
266 r = bdev_sendrec(dev, &m);
267
268 bdev_vrdwt_cleanup(&m, gvec);
269 } while (bdev_retry(&driver_tries, &transfer_tries, &r));
270
271 return r;
272 }
273
bdev_read(dev_t dev,u64_t pos,char * buf,size_t count,int flags)274 ssize_t bdev_read(dev_t dev, u64_t pos, char *buf, size_t count, int flags)
275 {
276 /* Perform a synchronous read call into a single buffer.
277 */
278
279 return bdev_rdwt(BDEV_READ, dev, pos, buf, count, flags);
280 }
281
bdev_write(dev_t dev,u64_t pos,char * buf,size_t count,int flags)282 ssize_t bdev_write(dev_t dev, u64_t pos, char *buf, size_t count, int flags)
283 {
284 /* Perform a synchronous write call from a single buffer.
285 */
286
287 return bdev_rdwt(BDEV_WRITE, dev, pos, buf, count, flags);
288 }
289
bdev_gather(dev_t dev,u64_t pos,iovec_t * vec,int count,int flags)290 ssize_t bdev_gather(dev_t dev, u64_t pos, iovec_t *vec, int count, int flags)
291 {
292 /* Perform a synchronous read call into a vector of buffers.
293 */
294
295 return bdev_vrdwt(BDEV_GATHER, dev, pos, vec, count, flags);
296 }
297
bdev_scatter(dev_t dev,u64_t pos,iovec_t * vec,int count,int flags)298 ssize_t bdev_scatter(dev_t dev, u64_t pos, iovec_t *vec, int count, int flags)
299 {
300 /* Perform a synchronous write call from a vector of buffers.
301 */
302
303 return bdev_vrdwt(BDEV_SCATTER, dev, pos, vec, count, flags);
304 }
305
bdev_ioctl_setup(dev_t dev,unsigned long request,void * buf,endpoint_t user_endpt,message * m)306 static int bdev_ioctl_setup(dev_t dev, unsigned long request, void *buf,
307 endpoint_t user_endpt, message *m)
308 {
309 /* Set up an I/O control request.
310 */
311 endpoint_t endpt;
312 size_t size;
313 cp_grant_id_t grant;
314 int perm;
315
316 if ((endpt = bdev_driver_get(dev)) == NONE)
317 return EDEADSRCDST;
318
319 if (_MINIX_IOCTL_BIG(request))
320 size = _MINIX_IOCTL_SIZE_BIG(request);
321 else
322 size = _MINIX_IOCTL_SIZE(request);
323
324 perm = 0;
325 if (_MINIX_IOCTL_IOR(request)) perm |= CPF_WRITE;
326 if (_MINIX_IOCTL_IOW(request)) perm |= CPF_READ;
327
328 /* The size may be 0, in which case 'buf' need not be a valid pointer. */
329 grant = cpf_grant_direct(endpt, (vir_bytes) buf, size, perm);
330
331 if (!GRANT_VALID(grant)) {
332 printf("bdev: unable to allocate grant!\n");
333 return EINVAL;
334 }
335
336 memset(m, 0, sizeof(*m));
337 m->m_type = BDEV_IOCTL;
338 m->m_lbdev_lblockdriver_msg.minor = minor(dev);
339 m->m_lbdev_lblockdriver_msg.request = request;
340 m->m_lbdev_lblockdriver_msg.grant = grant;
341 m->m_lbdev_lblockdriver_msg.user = user_endpt;
342
343 return OK;
344 }
345
bdev_ioctl_cleanup(const message * m)346 static void bdev_ioctl_cleanup(const message *m)
347 {
348 /* Clean up an I/O control request.
349 */
350
351 cpf_revoke(m->m_lbdev_lblockdriver_msg.grant);
352 }
353
bdev_ioctl(dev_t dev,unsigned long request,void * buf,endpoint_t user_endpt)354 int bdev_ioctl(dev_t dev, unsigned long request, void *buf,
355 endpoint_t user_endpt)
356 {
357 /* Perform a synchronous I/O control request.
358 */
359 message m;
360 int r, driver_tries = 0;
361
362 do {
363 if ((r = bdev_ioctl_setup(dev, request, buf, user_endpt, &m)) != OK)
364 break;
365
366 r = bdev_sendrec(dev, &m);
367
368 bdev_ioctl_cleanup(&m);
369 } while (bdev_retry(&driver_tries, NULL, &r));
370
371 return r;
372 }
373
bdev_flush_asyn(dev_t dev)374 void bdev_flush_asyn(dev_t dev)
375 {
376 /* Flush all ongoing asynchronous requests to the given minor device. This
377 * involves blocking until all I/O for it has completed.
378 * File system usage note: typically called from flush.
379 */
380 bdev_call_t *call;
381
382 while ((call = bdev_call_find(dev)) != NULL)
383 (void) bdev_wait_asyn(call->id);
384 }
385
bdev_rdwt_asyn(int req,dev_t dev,u64_t pos,char * buf,size_t count,int flags,bdev_callback_t callback,bdev_param_t param)386 static bdev_id_t bdev_rdwt_asyn(int req, dev_t dev, u64_t pos, char *buf,
387 size_t count, int flags, bdev_callback_t callback, bdev_param_t param)
388 {
389 /* Perform an asynchronous read or write call using a single buffer.
390 */
391 bdev_call_t *call;
392 int r;
393
394 if ((call = bdev_call_alloc(1)) == NULL)
395 return ENOMEM;
396
397 if ((r = bdev_rdwt_setup(req, dev, pos, buf, count, flags, &call->msg)) !=
398 OK) {
399 bdev_call_free(call);
400
401 return r;
402 }
403
404 if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
405 bdev_rdwt_cleanup(&call->msg);
406
407 bdev_call_free(call);
408
409 return r;
410 }
411
412 call->dev = dev;
413 call->callback = callback;
414 call->param = param;
415 call->driver_tries = 0;
416 call->transfer_tries = 0;
417 call->vec[0].iov_addr = (vir_bytes) buf;
418 call->vec[0].iov_size = count;
419
420 return call->id;
421 }
422
bdev_vrdwt_asyn(int req,dev_t dev,u64_t pos,iovec_t * vec,int count,int flags,bdev_callback_t callback,bdev_param_t param)423 static bdev_id_t bdev_vrdwt_asyn(int req, dev_t dev, u64_t pos, iovec_t *vec,
424 int count, int flags, bdev_callback_t callback, bdev_param_t param)
425 {
426 /* Perform an asynchronous read or write call using a vector of buffers.
427 */
428 bdev_call_t *call;
429 int r;
430
431 if ((call = bdev_call_alloc(count)) == NULL)
432 return ENOMEM;
433
434 if ((r = bdev_vrdwt_setup(req, dev, pos, vec, count, flags, &call->msg,
435 call->gvec)) != OK) {
436 bdev_call_free(call);
437
438 return r;
439 }
440
441 if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
442 bdev_vrdwt_cleanup(&call->msg, call->gvec);
443
444 bdev_call_free(call);
445
446 return r;
447 }
448
449 call->dev = dev;
450 call->callback = callback;
451 call->param = param;
452 call->driver_tries = 0;
453 call->transfer_tries = 0;
454 memcpy(call->vec, vec, sizeof(vec[0]) * count);
455
456 return call->id;
457 }
458
bdev_read_asyn(dev_t dev,u64_t pos,char * buf,size_t count,int flags,bdev_callback_t callback,bdev_param_t param)459 bdev_id_t bdev_read_asyn(dev_t dev, u64_t pos, char *buf, size_t count,
460 int flags, bdev_callback_t callback, bdev_param_t param)
461 {
462 /* Perform an asynchronous read call into a single buffer.
463 */
464
465 return bdev_rdwt_asyn(BDEV_READ, dev, pos, buf, count, flags, callback,
466 param);
467 }
468
bdev_write_asyn(dev_t dev,u64_t pos,char * buf,size_t count,int flags,bdev_callback_t callback,bdev_param_t param)469 bdev_id_t bdev_write_asyn(dev_t dev, u64_t pos, char *buf, size_t count,
470 int flags, bdev_callback_t callback, bdev_param_t param)
471 {
472 /* Perform an asynchronous write call from a single buffer.
473 */
474
475 return bdev_rdwt_asyn(BDEV_WRITE, dev, pos, buf, count, flags, callback,
476 param);
477 }
478
bdev_gather_asyn(dev_t dev,u64_t pos,iovec_t * vec,int count,int flags,bdev_callback_t callback,bdev_param_t param)479 bdev_id_t bdev_gather_asyn(dev_t dev, u64_t pos, iovec_t *vec, int count,
480 int flags, bdev_callback_t callback, bdev_param_t param)
481 {
482 /* Perform an asynchronous read call into a vector of buffers.
483 */
484
485 return bdev_vrdwt_asyn(BDEV_GATHER, dev, pos, vec, count, flags, callback,
486 param);
487 }
488
bdev_scatter_asyn(dev_t dev,u64_t pos,iovec_t * vec,int count,int flags,bdev_callback_t callback,bdev_param_t param)489 bdev_id_t bdev_scatter_asyn(dev_t dev, u64_t pos, iovec_t *vec, int count,
490 int flags, bdev_callback_t callback, bdev_param_t param)
491 {
492 /* Perform an asynchronous write call into a vector of buffers.
493 */
494
495 return bdev_vrdwt_asyn(BDEV_SCATTER, dev, pos, vec, count, flags, callback,
496 param);
497 }
498
bdev_ioctl_asyn(dev_t dev,unsigned long request,void * buf,endpoint_t user_endpt,bdev_callback_t callback,bdev_param_t param)499 bdev_id_t bdev_ioctl_asyn(dev_t dev, unsigned long request, void *buf,
500 endpoint_t user_endpt, bdev_callback_t callback, bdev_param_t param)
501 {
502 /* Perform an asynchronous I/O control request.
503 */
504 bdev_call_t *call;
505 int r;
506
507 if ((call = bdev_call_alloc(1)) == NULL)
508 return ENOMEM;
509
510 if ((r = bdev_ioctl_setup(dev, request, buf, user_endpt,
511 &call->msg)) != OK) {
512 bdev_call_free(call);
513
514 return r;
515 }
516
517 if ((r = bdev_senda(dev, &call->msg, call->id)) != OK) {
518 bdev_ioctl_cleanup(&call->msg);
519
520 bdev_call_free(call);
521
522 return r;
523 }
524
525 call->dev = dev;
526 call->callback = callback;
527 call->param = param;
528 call->driver_tries = 0;
529 call->vec[0].iov_addr = (vir_bytes) buf;
530
531 return call->id;
532 }
533
bdev_callback_asyn(bdev_call_t * call,int result)534 void bdev_callback_asyn(bdev_call_t *call, int result)
535 {
536 /* Perform the callback for an asynchronous request, with the given result.
537 * Clean up the call structure afterwards.
538 */
539
540 /* If this was a transfer request and the result is EIO, we may want to retry
541 * the request first.
542 */
543 switch (call->msg.m_type) {
544 case BDEV_READ:
545 case BDEV_WRITE:
546 case BDEV_GATHER:
547 case BDEV_SCATTER:
548 if (result == EIO && ++call->transfer_tries < TRANSFER_TRIES) {
549 result = bdev_senda(call->dev, &call->msg, call->id);
550
551 if (result == OK)
552 return;
553 }
554 }
555
556 /* Clean up. */
557 switch (call->msg.m_type) {
558 case BDEV_READ:
559 case BDEV_WRITE:
560 bdev_rdwt_cleanup(&call->msg);
561
562 break;
563
564 case BDEV_GATHER:
565 case BDEV_SCATTER:
566 bdev_vrdwt_cleanup(&call->msg, call->gvec);
567
568 break;
569
570 case BDEV_IOCTL:
571 bdev_ioctl_cleanup(&call->msg);
572
573 break;
574
575 default:
576 assert(0);
577 }
578
579 /* Call the callback function. */
580 /* FIXME: we assume all reasonable ssize_t values can be stored in an int. */
581 call->callback(call->dev, call->id, call->param, result);
582
583 /* Free up the call structure. */
584 bdev_call_free(call);
585 }
586
bdev_restart_asyn(bdev_call_t * call)587 int bdev_restart_asyn(bdev_call_t *call)
588 {
589 /* The driver for the given call has restarted, and may now have a new
590 * endpoint. Recreate and resend the request for the given call.
591 */
592 int type, r = OK;
593
594 /* Update and check the retry limit for driver restarts first. */
595 if (++call->driver_tries >= DRIVER_TRIES)
596 return EDEADSRCDST;
597
598 /* Recreate all grants for the new endpoint. */
599 type = call->msg.m_type;
600
601 switch (type) {
602 case BDEV_READ:
603 case BDEV_WRITE:
604 bdev_rdwt_cleanup(&call->msg);
605
606 r = bdev_rdwt_setup(type, call->dev,
607 call->msg.m_lbdev_lblockdriver_msg.pos,
608 (char *) call->vec[0].iov_addr, call->msg.m_lbdev_lblockdriver_msg.count,
609 call->msg.m_lbdev_lblockdriver_msg.flags, &call->msg);
610
611 break;
612
613 case BDEV_GATHER:
614 case BDEV_SCATTER:
615 bdev_vrdwt_cleanup(&call->msg, call->gvec);
616
617 r = bdev_vrdwt_setup(type, call->dev,
618 call->msg.m_lbdev_lblockdriver_msg.pos,
619 call->vec, call->msg.m_lbdev_lblockdriver_msg.count, call->msg.m_lbdev_lblockdriver_msg.flags,
620 &call->msg, call->gvec);
621
622 break;
623
624 case BDEV_IOCTL:
625 bdev_ioctl_cleanup(&call->msg);
626
627 r = bdev_ioctl_setup(call->dev, call->msg.m_lbdev_lblockdriver_msg.request,
628 (char *) call->vec[0].iov_addr, call->msg.m_lbdev_lblockdriver_msg.user,
629 &call->msg);
630
631 break;
632
633 default:
634 assert(0);
635 }
636
637 if (r != OK)
638 return r;
639
640 /* Try to resend the request. */
641 return bdev_senda(call->dev, &call->msg, call->id);
642 }
643