1 /* VirtualBox driver - by D.C. van Moolenbroek */
2 #include <machine/vmparam.h>
3
4 #include <minix/drivers.h>
5 #include <minix/vboxtype.h>
6 #include <minix/vboxif.h>
7 #include <assert.h>
8
9 #include "vmmdev.h"
10 #include "proto.h"
11
12 #define MAX_CONNS 4 /* maximum number of HGCM connections */
13 #define MAX_REQS 2 /* number of concurrent requests per conn. */
14 #define MAX_PARAMS 8 /* maximum number of parameters per request */
15
16 /* HGCM connection states. */
17 enum {
18 STATE_FREE,
19 STATE_OPENING,
20 STATE_OPEN,
21 STATE_CLOSING
22 };
23
24 /* HGCM connection information. */
25 static struct {
26 int state; /* connection state */
27 endpoint_t endpt; /* caller endpoint */
28 u32_t client_id; /* VMMDev-given client ID */
29 struct {
30 int busy; /* is this request ongoing? */
31 struct VMMDevHGCMHeader *ptr; /* request buffer */
32 phys_bytes addr; /* buffer's physical address */
33
34 int status; /* IPC status of request */
35 long id; /* request ID */
36
37 cp_grant_id_t grant; /* grant for parameters */
38 int count; /* number of parameters */
39 vbox_param_t param[MAX_PARAMS]; /* local copy of parameters */
40 } req[MAX_REQS]; /* concurrent requests */
41 } hgcm_conn[MAX_CONNS];
42
43 /*===========================================================================*
44 * convert_result *
45 *===========================================================================*/
convert_result(int res)46 static int convert_result(int res)
47 {
48 /* Convert a VirtualBox result code to a POSIX error code.
49 */
50
51 /* HGCM transport error codes. */
52 switch (res) {
53 case VMMDEV_ERR_HGCM_NOT_FOUND: return ESRCH;
54 case VMMDEV_ERR_HGCM_DENIED: return EPERM;
55 case VMMDEV_ERR_HGCM_INVALID_ADDR: return EFAULT;
56 case VMMDEV_ERR_HGCM_ASYNC_EXEC: return EDONTREPLY;
57 case VMMDEV_ERR_HGCM_INTERNAL: return EGENERIC;
58 case VMMDEV_ERR_HGCM_INVALID_ID: return EINVAL;
59 }
60
61 /* Positive codes are success codes. */
62 if (res >= 0)
63 return OK;
64
65 /* Unsupported negative codes are translated to EGENERIC; it is up to
66 * the caller to check the actual VirtualBox result code in that case.
67 */
68 return convert_err(res);
69 }
70
71 /*===========================================================================*
72 * send_reply *
73 *===========================================================================*/
send_reply(endpoint_t endpt,int ipc_status,int result,int code,long id)74 static void send_reply(endpoint_t endpt, int ipc_status, int result, int code,
75 long id)
76 {
77 /* Send a reply to an earlier request. */
78 message m;
79 int r;
80
81 memset(&m, 0, sizeof(m));
82 m.m_type = VBOX_REPLY;
83 m.VBOX_RESULT = result;
84 m.VBOX_CODE = code;
85 m.VBOX_ID = id;
86
87 if (IPC_STATUS_CALL(ipc_status) == SENDREC)
88 r = ipc_sendnb(endpt, &m);
89 else
90 r = asynsend3(endpt, &m, AMF_NOREPLY);
91
92 if (r != OK)
93 printf("VBOX: unable to send reply to %d: %d\n", endpt, r);
94 }
95
96 /*===========================================================================*
97 * alloc_req *
98 *===========================================================================*/
alloc_req(int conn)99 static int alloc_req(int conn)
100 {
101 /* Allocate a request for the given connection. Allocate memory as
102 * necessary. Do not mark the request as busy, as it may end up not
103 * being used.
104 */
105 phys_bytes addr;
106 void *ptr;
107 int req;
108
109 for (req = 0; req < MAX_REQS; req++)
110 if (!hgcm_conn[conn].req[req].busy)
111 break;
112
113 if (req == MAX_REQS)
114 return EMFILE;
115
116 if (hgcm_conn[conn].req[req].ptr == NULL) {
117 if ((ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &addr)) == NULL)
118 return ENOMEM;
119
120 hgcm_conn[conn].req[req].ptr = (struct VMMDevHGCMHeader *) ptr;
121 hgcm_conn[conn].req[req].addr = addr;
122 }
123
124 return req;
125 }
126
127 /*===========================================================================*
128 * free_conn *
129 *===========================================================================*/
free_conn(int conn)130 static void free_conn(int conn)
131 {
132 /* Free the memory for all requests of the given connections, and mark
133 * the connection as free.
134 */
135 void *ptr;
136 int req;
137
138 for (req = 0; req < MAX_REQS; req++) {
139 if ((ptr = (void *) hgcm_conn[conn].req[req].ptr) != NULL) {
140 assert(!hgcm_conn[conn].req[req].busy);
141
142 free_contig(ptr, VMMDEV_BUF_SIZE);
143
144 hgcm_conn[conn].req[req].ptr = NULL;
145 }
146 }
147
148 hgcm_conn[conn].state = STATE_FREE;
149 }
150
151 /*===========================================================================*
152 * start_req *
153 *===========================================================================*/
start_req(int conn,int req,int type,size_t size,int ipc_status,long id,int * code)154 static int start_req(int conn, int req, int type, size_t size, int ipc_status,
155 long id, int *code)
156 {
157 /* Start a request. */
158 int r, res;
159
160 hgcm_conn[conn].req[req].ptr->flags = 0;
161 hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
162
163 *code = res = vbox_request(&hgcm_conn[conn].req[req].ptr->header,
164 hgcm_conn[conn].req[req].addr, type, size);
165
166 r = convert_result(res);
167
168 if (r != OK && r != EDONTREPLY)
169 return r;
170
171 /* The request may be processed either immediately or asynchronously.
172 * The caller of this function must be able to cope with both
173 * situations. In either case, mark the current request as ongoing.
174 */
175 hgcm_conn[conn].req[req].busy = TRUE;
176 hgcm_conn[conn].req[req].status = ipc_status;
177 hgcm_conn[conn].req[req].id = id;
178
179 return r;
180 }
181
182 /*===========================================================================*
183 * cancel_req *
184 *===========================================================================*/
cancel_req(int conn,int req)185 static void cancel_req(int conn, int req)
186 {
187 /* Cancel an ongoing request. */
188
189 assert(hgcm_conn[conn].req[req].ptr != NULL);
190
191 /* The cancel request consists only of the HGCM header. The physical
192 * location determines the request to cancel. Note that request
193 * cancellation this is full of race conditions, so we simply ignore
194 * the return value and assumed all went well.
195 */
196 hgcm_conn[conn].req[req].ptr->flags = 0;
197 hgcm_conn[conn].req[req].ptr->result = VMMDEV_ERR_GENERIC;
198
199 vbox_request(&hgcm_conn[conn].req[req].ptr->header,
200 hgcm_conn[conn].req[req].addr, VMMDEV_REQ_HGCMCANCEL,
201 sizeof(struct VMMDevHGCMCancel));
202
203 hgcm_conn[conn].req[req].busy = FALSE;
204 }
205
206 /*===========================================================================*
207 * finish_req *
208 *===========================================================================*/
finish_req(int conn,int req,int * code)209 static int finish_req(int conn, int req, int *code)
210 {
211 /* The given request has finished. Take the appropriate action.
212 */
213 struct VMMDevHGCMConnect *connreq;
214 struct VMMDevHGCMCall *callreq;
215 struct VMMDevHGCMParam *inp;
216 vbox_param_t *outp;
217 int i, count, res, r = OK;
218
219 hgcm_conn[conn].req[req].busy = FALSE;
220
221 *code = res = hgcm_conn[conn].req[req].ptr->result;
222
223 r = convert_result(res);
224
225 /* The request has finished, so it cannot still be in progress. */
226 if (r == EDONTREPLY)
227 r = EGENERIC;
228
229 switch (hgcm_conn[conn].state) {
230 case STATE_FREE:
231 assert(0);
232
233 break;
234
235 case STATE_OPENING:
236 if (r == OK) {
237 connreq = (struct VMMDevHGCMConnect *)
238 hgcm_conn[conn].req[req].ptr;
239 hgcm_conn[conn].client_id = connreq->client_id;
240 hgcm_conn[conn].state = STATE_OPEN;
241
242 r = conn;
243 } else {
244 free_conn(conn);
245 }
246
247 break;
248
249 case STATE_CLOSING:
250 /* Neither we nor the caller can do anything with failures. */
251 if (r != OK)
252 printf("VBOX: disconnection failure #2 (%d)\n", res);
253
254 free_conn(conn);
255
256 r = OK;
257
258 break;
259
260 case STATE_OPEN:
261 /* On success, extract and copy back parameters to the caller.
262 */
263 if (r == OK) {
264 callreq = (struct VMMDevHGCMCall *)
265 hgcm_conn[conn].req[req].ptr;
266 inp = (struct VMMDevHGCMParam *) &callreq[1];
267 outp = &hgcm_conn[conn].req[req].param[0];
268 count = hgcm_conn[conn].req[req].count;
269
270 for (i = 0; i < count; i++) {
271 switch (outp->type) {
272 case VBOX_TYPE_U32:
273 outp->u32 = inp->u32;
274 break;
275
276 case VBOX_TYPE_U64:
277 outp->u64 = inp->u64;
278 break;
279
280 default:
281 break;
282 }
283
284 inp++;
285 outp++;
286 }
287
288 if (count > 0) {
289 r = sys_safecopyto(hgcm_conn[conn].endpt,
290 hgcm_conn[conn].req[req].grant, 0,
291 (vir_bytes)
292 hgcm_conn[conn].req[req].param,
293 count * sizeof(vbox_param_t));
294 }
295 }
296
297 break;
298 }
299
300 return r;
301 }
302
303 /*===========================================================================*
304 * check_conn *
305 *===========================================================================*/
check_conn(int conn)306 static void check_conn(int conn)
307 {
308 /* Check all requests for the given connection for completion. */
309 int r, req, code;
310
311 for (req = 0; req < MAX_REQS; req++) {
312 if (!hgcm_conn[conn].req[req].busy) continue;
313
314 if (!(hgcm_conn[conn].req[req].ptr->flags &
315 VMMDEV_HGCM_REQ_DONE))
316 continue;
317
318 r = finish_req(conn, req, &code);
319
320 assert(r != EDONTREPLY);
321
322 send_reply(hgcm_conn[conn].endpt,
323 hgcm_conn[conn].req[req].status, r, code,
324 hgcm_conn[conn].req[req].id);
325 }
326 }
327
328 /*===========================================================================*
329 * do_open *
330 *===========================================================================*/
do_open(message * m_ptr,int ipc_status,int * code)331 static int do_open(message *m_ptr, int ipc_status, int *code)
332 {
333 /* Process a connection request. */
334 struct VMMDevHGCMConnect *connreq;
335 int i, r, conn, count;
336
337 if (m_ptr->VBOX_COUNT < 0 || m_ptr->VBOX_COUNT > VMMDEV_HGCM_NAME_SIZE)
338 return EINVAL;
339
340 /* Find a free connection slot. Make sure the sending endpoint is not
341 * already using up half of the connection slots.
342 */
343 conn = -1;
344 count = 0;
345 for (i = 0; i < MAX_CONNS; i++) {
346 if (conn < 0 && hgcm_conn[i].state == STATE_FREE)
347 conn = i;
348 if (hgcm_conn[i].endpt == m_ptr->m_source)
349 count++;
350 }
351
352 if (count >= MAX(MAX_CONNS / 2, 2))
353 return EMFILE;
354
355 if (conn < 0)
356 return ENFILE;
357
358 /* Initialize the connection and request structures. */
359 hgcm_conn[conn].state = STATE_OPENING;
360 hgcm_conn[conn].endpt = m_ptr->m_source;
361
362 for (i = 0; i < MAX_REQS; i++) {
363 hgcm_conn[conn].req[i].busy = FALSE;
364 hgcm_conn[conn].req[i].ptr = NULL;
365 }
366
367 /* Set up and start the connection request. */
368 r = alloc_req(conn);
369
370 if (r < 0)
371 return r;
372 assert(r == 0);
373
374 connreq = (struct VMMDevHGCMConnect *) hgcm_conn[conn].req[0].ptr;
375 connreq->type = VMMDEV_HGCM_SVCLOC_LOCALHOST_EXISTING;
376 if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT, 0,
377 (vir_bytes) connreq->name, m_ptr->VBOX_COUNT)) !=
378 OK) {
379 free_conn(conn);
380
381 return r;
382 }
383 connreq->name[VMMDEV_HGCM_NAME_SIZE-1] = 0;
384
385 r = start_req(conn, 0, VMMDEV_REQ_HGCMCONNECT, sizeof(*connreq),
386 ipc_status, m_ptr->VBOX_ID, code);
387
388 if (r != OK && r != EDONTREPLY) {
389 free_conn(conn);
390
391 return r;
392 }
393
394 return (r == OK) ? finish_req(conn, 0, code) : r;
395 }
396
397 /*===========================================================================*
398 * do_close *
399 *===========================================================================*/
do_close(message * m_ptr,int ipc_status,int * code)400 static int do_close(message *m_ptr, int ipc_status, int *code)
401 {
402 /* Process a disconnection request. */
403 struct VMMDevHGCMDisconnect *discreq;
404 int r, conn, req;
405
406 conn = m_ptr->VBOX_CONN;
407
408 /* Sanity checks. */
409 if (conn < 0 || conn >= MAX_CONNS)
410 return EINVAL;
411 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
412 hgcm_conn[conn].state != STATE_OPEN)
413 return EINVAL;
414
415 /* Cancel any ongoing requests. */
416 for (req = 0; req < MAX_REQS; req++)
417 if (hgcm_conn[conn].req[req].busy)
418 cancel_req(conn, req);
419
420 assert(hgcm_conn[conn].req[0].ptr != NULL);
421
422 discreq = (struct VMMDevHGCMDisconnect *) hgcm_conn[conn].req[0].ptr;
423 discreq->client_id = hgcm_conn[conn].client_id;
424
425 r = start_req(conn, 0, VMMDEV_REQ_HGCMDISCONNECT, sizeof(*discreq),
426 ipc_status, m_ptr->VBOX_ID, code);
427
428 if (r != OK && r != EDONTREPLY) {
429 /* Neither we nor the caller can do anything with failures. */
430 printf("VBOX: disconnection failure #1 (%d)\n", r);
431
432 free_conn(conn);
433
434 return OK;
435 }
436
437 hgcm_conn[conn].state = STATE_CLOSING;
438
439 return (r == OK) ? finish_req(conn, 0, code) : r;
440 }
441
442 /*===========================================================================*
443 * store_pages *
444 *===========================================================================*/
store_pages(int conn,int req,vbox_param_t * inp,size_t * offp)445 static int store_pages(int conn, int req, vbox_param_t *inp, size_t *offp)
446 {
447 /* Create a page list of physical pages that make up the provided
448 * buffer area.
449 */
450 struct vumap_vir vvec;
451 struct vumap_phys pvec[MAPVEC_NR];
452 struct VMMDevHGCMPageList *pagelist;
453 size_t offset, size, skip;
454 int i, j, r, first, access, count, pages;
455
456 /* Empty strings are allowed. */
457 if (inp->ptr.size == 0)
458 return OK;
459
460 pagelist = (struct VMMDevHGCMPageList *)
461 (((u8_t *) hgcm_conn[conn].req[req].ptr) + *offp);
462
463 pagelist->flags = 0;
464 if (inp->ptr.dir & VBOX_DIR_IN)
465 pagelist->flags |= VMMDEV_HGCM_FLAG_FROM_HOST;
466 if (inp->ptr.dir & VBOX_DIR_OUT)
467 pagelist->flags |= VMMDEV_HGCM_FLAG_TO_HOST;
468 pagelist->count = 0;
469
470 /* Make sure there is room for the header (but no actual pages yet). */
471 *offp += sizeof(*pagelist) - sizeof(pagelist->addr[0]);
472 if (*offp > VMMDEV_BUF_SIZE)
473 return ENOMEM;
474
475 access = 0;
476 if (inp->ptr.dir & VBOX_DIR_IN) access |= VUA_WRITE;
477 if (inp->ptr.dir & VBOX_DIR_OUT) access |= VUA_READ;
478
479 offset = 0;
480 first = TRUE;
481 do {
482 /* If the caller gives us a huge buffer, we might need multiple
483 * calls to sys_vumap(). Note that the caller currently has no
484 * reliable way to know whether such a buffer will fit in our
485 * request page. In the future, we may dynamically reallocate
486 * the request area to make more room as necessary; for now we
487 * just return an ENOMEM error in such cases.
488 */
489 vvec.vv_grant = inp->ptr.grant;
490 vvec.vv_size = inp->ptr.off + inp->ptr.size;
491 count = MAPVEC_NR;
492 if ((r = sys_vumap(hgcm_conn[conn].endpt, &vvec, 1,
493 inp->ptr.off + offset, access, pvec,
494 &count)) != OK)
495 return r;
496
497 /* First get the number of bytes processed, before (possibly)
498 * adjusting the size of the first element.
499 */
500 for (i = size = 0; i < count; i++)
501 size += pvec[i].vp_size;
502
503 /* VirtualBox wants aligned page addresses only, and an offset
504 * into the first page. All other pages except the last are
505 * full pages, and the last page is cut off using the size.
506 */
507 skip = 0;
508 if (first) {
509 skip = pvec[0].vp_addr & (PAGE_SIZE - 1);
510 pvec[0].vp_addr -= skip;
511 pvec[0].vp_size += skip;
512 pagelist->offset = skip;
513 first = FALSE;
514 }
515
516 /* How many pages were mapped? */
517 pages = (skip + size + PAGE_SIZE - 1) / PAGE_SIZE;
518
519 /* Make sure there is room to store this many extra pages. */
520 *offp += sizeof(pagelist->addr[0]) * pages;
521 if (*offp > VMMDEV_BUF_SIZE)
522 return ENOMEM;
523
524 /* Actually store the pages in the page list. */
525 for (i = j = 0; i < pages; i++) {
526 assert(!(pvec[j].vp_addr & (PAGE_SIZE - 1)));
527
528 pagelist->addr[pagelist->count++] =
529 ((u64_t)(pvec[j].vp_addr));
530
531 if (pvec[j].vp_size > PAGE_SIZE) {
532 pvec[j].vp_addr += PAGE_SIZE;
533 pvec[j].vp_size -= PAGE_SIZE;
534 }
535 else j++;
536 }
537 assert(j == count);
538
539 offset += size;
540 } while (offset < inp->ptr.size);
541
542 assert(offset == inp->ptr.size);
543
544 return OK;
545 }
546
547 /*===========================================================================*
548 * do_call *
549 *===========================================================================*/
do_call(message * m_ptr,int ipc_status,int * code)550 static int do_call(message *m_ptr, int ipc_status, int *code)
551 {
552 /* Perform a HGCM call. */
553 vbox_param_t *inp;
554 struct VMMDevHGCMParam *outp;
555 struct VMMDevHGCMCall *callreq;
556 size_t size;
557 int i, r, conn, req, count;
558
559 conn = m_ptr->VBOX_CONN;
560 count = m_ptr->VBOX_COUNT;
561
562 /* Sanity checks. */
563 if (conn < 0 || conn >= MAX_CONNS)
564 return EINVAL;
565 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
566 hgcm_conn[conn].state != STATE_OPEN)
567 return EINVAL;
568
569 /* Allocate a request, and copy in the parameters. */
570 req = alloc_req(conn);
571
572 if (req < 0)
573 return req;
574
575 hgcm_conn[conn].req[req].grant = m_ptr->VBOX_GRANT;
576 hgcm_conn[conn].req[req].count = count;
577
578 if (count > 0) {
579 if ((r = sys_safecopyfrom(m_ptr->m_source, m_ptr->VBOX_GRANT,
580 0, (vir_bytes) hgcm_conn[conn].req[req].param,
581 count * sizeof(vbox_param_t))) != OK)
582 return r;
583 }
584
585 /* Set up the basic request. */
586 callreq = (struct VMMDevHGCMCall *) hgcm_conn[conn].req[req].ptr;
587 callreq->client_id = hgcm_conn[conn].client_id;
588 callreq->function = m_ptr->VBOX_FUNCTION;
589 callreq->count = count;
590
591 /* Rewrite and convert the parameters. */
592 inp = &hgcm_conn[conn].req[req].param[0];
593 outp = (struct VMMDevHGCMParam *) &callreq[1];
594
595 size = sizeof(*callreq) + sizeof(*outp) * count;
596 assert(size < VMMDEV_BUF_SIZE);
597
598 for (i = 0; i < count; i++) {
599 switch (inp->type) {
600 case VBOX_TYPE_U32:
601 outp->type = VMMDEV_HGCM_PARAM_U32;
602 outp->u32 = inp->u32;
603 break;
604
605 case VBOX_TYPE_U64:
606 outp->type = VMMDEV_HGCM_PARAM_U64;
607 outp->u64 = inp->u64;
608 break;
609
610 case VBOX_TYPE_PTR:
611 outp->type = VMMDEV_HGCM_PARAM_PAGELIST;
612 outp->pagelist.offset = size;
613 outp->pagelist.size = inp->ptr.size;
614
615 if ((r = store_pages(conn, req, inp, &size)) != OK)
616 return r;
617
618 break;
619
620 default:
621 return EINVAL;
622 }
623
624 inp++;
625 outp++;
626 }
627
628 /* Start the request. */
629 r = start_req(conn, req, VMMDEV_REQ_HGCMCALL, size, ipc_status,
630 m_ptr->VBOX_ID, code);
631
632 if (r != OK && r != EDONTREPLY)
633 return r;
634
635 return (r == OK) ? finish_req(conn, req, code) : r;
636 }
637
638 /*===========================================================================*
639 * do_cancel *
640 *===========================================================================*/
do_cancel(message * m_ptr,int ipc_status)641 static int do_cancel(message *m_ptr, int ipc_status)
642 {
643 /* Cancel an ongoing call. */
644 int conn, req;
645
646 conn = m_ptr->VBOX_CONN;
647
648 /* Sanity checks. Note that connection and disconnection requests
649 * cannot be cancelled.
650 */
651 if (conn < 0 || conn >= MAX_CONNS)
652 return EINVAL;
653 if (hgcm_conn[conn].endpt != m_ptr->m_source ||
654 hgcm_conn[conn].state != STATE_OPEN)
655 return EINVAL;
656
657 /* Find the request. */
658 for (req = 0; req < MAX_REQS; req++) {
659 if (hgcm_conn[conn].req[req].busy &&
660 hgcm_conn[conn].req[req].id == m_ptr->VBOX_ID)
661 break;
662 }
663
664 /* If no such request was ongoing, then our behavior depends on the
665 * way the request was made: we do not want to send two asynchronous
666 * replies for one request, but if the caller used SENDREC, we have to
667 * reply with something or the caller would deadlock.
668 */
669 if (req == MAX_REQS) {
670 if (IPC_STATUS_CALL(ipc_status) == SENDREC)
671 return EINVAL;
672 else
673 return EDONTREPLY;
674 }
675
676 /* Actually cancel the request, and send a reply. */
677 cancel_req(conn, req);
678
679 return EINTR;
680 }
681
682 /*===========================================================================*
683 * hgcm_message *
684 *===========================================================================*/
hgcm_message(message * m_ptr,int ipc_status)685 void hgcm_message(message *m_ptr, int ipc_status)
686 {
687 /* Process a request message. */
688 int r, code = VMMDEV_ERR_GENERIC;
689
690 switch (m_ptr->m_type) {
691 case VBOX_OPEN: r = do_open(m_ptr, ipc_status, &code); break;
692 case VBOX_CLOSE: r = do_close(m_ptr, ipc_status, &code); break;
693 case VBOX_CALL: r = do_call(m_ptr, ipc_status, &code); break;
694 case VBOX_CANCEL: r = do_cancel(m_ptr, ipc_status); break;
695 default: r = ENOSYS; break;
696 }
697
698 if (r != EDONTREPLY)
699 send_reply(m_ptr->m_source, ipc_status, r, code,
700 m_ptr->VBOX_ID);
701 }
702
703 /*===========================================================================*
704 * hgcm_intr *
705 *===========================================================================*/
hgcm_intr(void)706 void hgcm_intr(void)
707 {
708 /* We received an HGCM event. Check ongoing requests for completion. */
709 int conn;
710
711 for (conn = 0; conn < MAX_CONNS; conn++)
712 if (hgcm_conn[conn].state != STATE_FREE)
713 check_conn(conn);
714 }
715