1 #include "fs.h" 2 #include <minix/vfsif.h> 3 #include <assert.h> 4 #include <string.h> 5 6 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp); 7 static int queuemsg(struct vmnt *vmp); 8 9 /*===========================================================================* 10 * sendmsg * 11 *===========================================================================*/ 12 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp) 13 { 14 /* This is the low level function that sends requests. 15 * Currently to FSes or VM. 16 */ 17 int r, transid; 18 19 if(vmp) vmp->m_comm.c_cur_reqs++; /* One more request awaiting a reply */ 20 transid = wp->w_tid + VFS_TRANSID; 21 wp->w_sendrec->m_type = TRNS_ADD_ID(wp->w_sendrec->m_type, transid); 22 wp->w_task = dst; 23 if ((r = asynsend3(dst, wp->w_sendrec, AMF_NOREPLY)) != OK) { 24 printf("VFS: sendmsg: error sending message. " 25 "dest: %d req_nr: %d err: %d\n", dst, 26 wp->w_sendrec->m_type, r); 27 util_stacktrace(); 28 return(r); 29 } 30 31 return(r); 32 } 33 34 /*===========================================================================* 35 * send_work * 36 *===========================================================================*/ 37 void send_work(void) 38 { 39 /* Try to send out as many requests as possible */ 40 struct vmnt *vmp; 41 42 if (sending == 0) return; 43 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) 44 fs_sendmore(vmp); 45 } 46 47 /*===========================================================================* 48 * fs_cancel * 49 *===========================================================================*/ 50 void fs_cancel(struct vmnt *vmp) 51 { 52 /* Cancel all pending requests for this vmp */ 53 struct worker_thread *worker; 54 55 while ((worker = vmp->m_comm.c_req_queue) != NULL) { 56 vmp->m_comm.c_req_queue = worker->w_next; 57 worker->w_next = NULL; 58 sending--; 59 worker_stop(worker); 60 } 61 } 62 63 /*===========================================================================* 64 * fs_sendmore * 65 *===========================================================================*/ 66 void fs_sendmore(struct vmnt *vmp) 67 { 68 struct worker_thread *worker; 69 70 /* Can we send more requests? */ 71 if (vmp->m_fs_e == NONE) return; 72 if ((worker = vmp->m_comm.c_req_queue) == NULL) /* No process is queued */ 73 return; 74 if (vmp->m_comm.c_cur_reqs >= vmp->m_comm.c_max_reqs)/*No room to send more*/ 75 return; 76 if (vmp->m_flags & VMNT_CALLBACK) /* Hold off for now */ 77 return; 78 79 vmp->m_comm.c_req_queue = worker->w_next; /* Remove head */ 80 worker->w_next = NULL; 81 sending--; 82 assert(sending >= 0); 83 (void) sendmsg(vmp, vmp->m_fs_e, worker); 84 } 85 86 /*===========================================================================* 87 * drv_sendrec * 88 *===========================================================================*/ 89 int drv_sendrec(endpoint_t drv_e, message *reqmp) 90 { 91 int r; 92 struct dmap *dp; 93 94 /* For the CTTY_MAJOR case, we would actually have to lock the device 95 * entry being redirected to. However, the CTTY major only hosts a 96 * character device while this function is used only for block devices. 97 * Thus, we can simply deny the request immediately. 98 */ 99 if (drv_e == CTTY_ENDPT) { 100 printf("VFS: /dev/tty is not a block device!\n"); 101 return EIO; 102 } 103 104 if ((dp = get_dmap(drv_e)) == NULL) 105 panic("driver endpoint %d invalid", drv_e); 106 107 lock_dmap(dp); 108 if (dp->dmap_servicing != INVALID_THREAD) 109 panic("driver locking inconsistency"); 110 dp->dmap_servicing = self->w_tid; 111 self->w_task = drv_e; 112 self->w_drv_sendrec = reqmp; 113 114 if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) { 115 /* Yield execution until we've received the reply */ 116 worker_wait(); 117 118 } else { 119 printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n", 120 drv_e, r); 121 self->w_drv_sendrec = NULL; 122 } 123 124 assert(self->w_drv_sendrec == NULL); 125 dp->dmap_servicing = INVALID_THREAD; 126 self->w_task = NONE; 127 unlock_dmap(dp); 128 return(r); 129 } 130 131 /*===========================================================================* 132 * fs_sendrec * 133 *===========================================================================*/ 134 int fs_sendrec(endpoint_t fs_e, message *reqmp) 135 { 136 struct vmnt *vmp; 137 int r; 138 139 if ((vmp = find_vmnt(fs_e)) == NULL) { 140 printf("Trying to talk to non-existent FS endpoint %d\n", fs_e); 141 return(EIO); 142 } 143 if (fs_e == fp->fp_endpoint) return(EDEADLK); 144 145 assert(self->w_sendrec == NULL); 146 self->w_sendrec = reqmp; /* Where to store request and reply */ 147 148 /* Find out whether we can send right away or have to enqueue */ 149 if ( !(vmp->m_flags & VMNT_CALLBACK) && 150 vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) { 151 /* There's still room to send more and no proc is queued */ 152 r = sendmsg(vmp, vmp->m_fs_e, self); 153 } else { 154 r = queuemsg(vmp); 155 } 156 self->w_next = NULL; /* End of list */ 157 158 if (r != OK) return(r); 159 160 worker_wait(); /* Yield execution until we've received the reply. */ 161 162 assert(self->w_sendrec == NULL); 163 164 return(reqmp->m_type); 165 } 166 167 /*===========================================================================* 168 * vm_sendrec * 169 *===========================================================================*/ 170 int vm_sendrec(message *reqmp) 171 { 172 int r; 173 174 assert(self); 175 assert(reqmp); 176 177 assert(self->w_sendrec == NULL); 178 self->w_sendrec = reqmp; /* Where to store request and reply */ 179 180 r = sendmsg(NULL, VM_PROC_NR, self); 181 182 self->w_next = NULL; /* End of list */ 183 184 if (r != OK) return(r); 185 186 worker_wait(); /* Yield execution until we've received the reply. */ 187 188 assert(self->w_sendrec == NULL); 189 190 return(reqmp->m_type); 191 } 192 193 194 /*===========================================================================* 195 * vm_vfs_procctl_handlemem * 196 *===========================================================================*/ 197 int vm_vfs_procctl_handlemem(endpoint_t ep, 198 vir_bytes mem, vir_bytes len, int flags) 199 { 200 message m; 201 202 /* main thread can not be suspended */ 203 if(!self) return EFAULT; 204 205 memset(&m, 0, sizeof(m)); 206 207 m.m_type = VM_PROCCTL; 208 m.VMPCTL_WHO = ep; 209 m.VMPCTL_PARAM = VMPPARAM_HANDLEMEM; 210 m.VMPCTL_M1 = mem; 211 m.VMPCTL_LEN = len; 212 m.VMPCTL_FLAGS = flags; 213 214 return vm_sendrec(&m); 215 } 216 217 /*===========================================================================* 218 * queuemsg * 219 *===========================================================================*/ 220 static int queuemsg(struct vmnt *vmp) 221 { 222 /* Put request on queue for vmnt */ 223 224 struct worker_thread *queue; 225 226 if (vmp->m_comm.c_req_queue == NULL) { 227 vmp->m_comm.c_req_queue = self; 228 } else { 229 /* Walk the list ... */ 230 queue = vmp->m_comm.c_req_queue; 231 while (queue->w_next != NULL) queue = queue->w_next; 232 233 /* ... and append this worker */ 234 queue->w_next = self; 235 } 236 237 self->w_next = NULL; /* End of list */ 238 sending++; 239 240 return(OK); 241 } 242