1 #include "fs.h" 2 #include <minix/vfsif.h> 3 #include <assert.h> 4 #include <string.h> 5 6 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp); 7 static int queuemsg(struct vmnt *vmp); 8 9 /*===========================================================================* 10 * sendmsg * 11 *===========================================================================*/ 12 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp) 13 { 14 /* This is the low level function that sends requests. 15 * Currently to FSes or VM. 16 */ 17 int r, transid; 18 19 if(vmp) vmp->m_comm.c_cur_reqs++; /* One more request awaiting a reply */ 20 transid = wp->w_tid + VFS_TRANSID; 21 wp->w_sendrec->m_type = TRNS_ADD_ID(wp->w_sendrec->m_type, transid); 22 wp->w_task = dst; 23 if ((r = asynsend3(dst, wp->w_sendrec, AMF_NOREPLY)) != OK) { 24 printf("VFS: sendmsg: error sending message. " 25 "dest: %d req_nr: %d err: %d\n", dst, 26 wp->w_sendrec->m_type, r); 27 util_stacktrace(); 28 return(r); 29 } 30 31 return(r); 32 } 33 34 /*===========================================================================* 35 * send_work * 36 *===========================================================================*/ 37 void send_work(void) 38 { 39 /* Try to send out as many requests as possible */ 40 struct vmnt *vmp; 41 42 if (sending == 0) return; 43 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) 44 fs_sendmore(vmp); 45 } 46 47 /*===========================================================================* 48 * fs_cancel * 49 *===========================================================================*/ 50 void fs_cancel(struct vmnt *vmp) 51 { 52 /* Cancel all pending requests for this vmp */ 53 struct worker_thread *worker; 54 55 while ((worker = vmp->m_comm.c_req_queue) != NULL) { 56 vmp->m_comm.c_req_queue = worker->w_next; 57 worker->w_next = NULL; 58 sending--; 59 worker_stop(worker); 60 } 61 } 62 63 /*===========================================================================* 64 * fs_sendmore * 65 *===========================================================================*/ 66 void fs_sendmore(struct vmnt *vmp) 67 { 68 struct worker_thread *worker; 69 70 /* Can we send more requests? */ 71 if (vmp->m_fs_e == NONE) return; 72 if ((worker = vmp->m_comm.c_req_queue) == NULL) /* No process is queued */ 73 return; 74 if (vmp->m_comm.c_cur_reqs >= vmp->m_comm.c_max_reqs)/*No room to send more*/ 75 return; 76 if (vmp->m_flags & VMNT_CALLBACK) /* Hold off for now */ 77 return; 78 79 vmp->m_comm.c_req_queue = worker->w_next; /* Remove head */ 80 worker->w_next = NULL; 81 sending--; 82 assert(sending >= 0); 83 (void) sendmsg(vmp, vmp->m_fs_e, worker); 84 } 85 86 /*===========================================================================* 87 * drv_sendrec * 88 *===========================================================================*/ 89 int drv_sendrec(endpoint_t drv_e, message *reqmp) 90 { 91 int r; 92 struct dmap *dp; 93 94 /* For the CTTY_MAJOR case, we would actually have to lock the device 95 * entry being redirected to. However, the CTTY major only hosts a 96 * character device while this function is used only for block devices. 97 * Thus, we can simply deny the request immediately. 98 */ 99 if (drv_e == CTTY_ENDPT) { 100 printf("VFS: /dev/tty is not a block device!\n"); 101 return EIO; 102 } 103 104 if ((dp = get_dmap_by_endpt(drv_e)) == NULL) 105 panic("driver endpoint %d invalid", drv_e); 106 107 lock_dmap(dp); 108 if (dp->dmap_servicing != INVALID_THREAD) 109 panic("driver locking inconsistency"); 110 dp->dmap_servicing = self->w_tid; 111 self->w_task = drv_e; 112 self->w_drv_sendrec = reqmp; 113 114 if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) { 115 /* Yield execution until we've received the reply */ 116 worker_wait(); 117 118 } else { 119 printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n", 120 drv_e, r); 121 self->w_drv_sendrec = NULL; 122 } 123 124 assert(self->w_drv_sendrec == NULL); 125 dp->dmap_servicing = INVALID_THREAD; 126 self->w_task = NONE; 127 unlock_dmap(dp); 128 return(r); 129 } 130 131 /*===========================================================================* 132 * fs_sendrec * 133 *===========================================================================*/ 134 int fs_sendrec(endpoint_t fs_e, message *reqmp) 135 { 136 struct vmnt *vmp; 137 int r; 138 139 if ((vmp = find_vmnt(fs_e)) == NULL) { 140 printf("Trying to talk to non-existent FS endpoint %d\n", fs_e); 141 return(EIO); 142 } 143 if (fs_e == fp->fp_endpoint) return(EDEADLK); 144 145 assert(self->w_sendrec == NULL); 146 self->w_sendrec = reqmp; /* Where to store request and reply */ 147 148 /* Find out whether we can send right away or have to enqueue */ 149 if ( !(vmp->m_flags & VMNT_CALLBACK) && 150 vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) { 151 /* There's still room to send more and no proc is queued */ 152 r = sendmsg(vmp, vmp->m_fs_e, self); 153 } else { 154 r = queuemsg(vmp); 155 } 156 self->w_next = NULL; /* End of list */ 157 158 if (r != OK) return(r); 159 160 worker_wait(); /* Yield execution until we've received the reply. */ 161 162 assert(self->w_sendrec == NULL); 163 164 r = reqmp->m_type; 165 if (r == ERESTART) /* ERESTART is used internally, so make sure it is.. */ 166 r = EIO; /* ..not delivered as a result from a file system. */ 167 return(r); 168 } 169 170 /*===========================================================================* 171 * vm_sendrec * 172 *===========================================================================*/ 173 int vm_sendrec(message *reqmp) 174 { 175 int r; 176 177 assert(self); 178 assert(reqmp); 179 180 assert(self->w_sendrec == NULL); 181 self->w_sendrec = reqmp; /* Where to store request and reply */ 182 183 r = sendmsg(NULL, VM_PROC_NR, self); 184 185 self->w_next = NULL; /* End of list */ 186 187 if (r != OK) return(r); 188 189 worker_wait(); /* Yield execution until we've received the reply. */ 190 191 assert(self->w_sendrec == NULL); 192 193 return(reqmp->m_type); 194 } 195 196 197 /*===========================================================================* 198 * vm_vfs_procctl_handlemem * 199 *===========================================================================*/ 200 int vm_vfs_procctl_handlemem(endpoint_t ep, 201 vir_bytes mem, vir_bytes len, int flags) 202 { 203 message m; 204 205 /* main thread can not be suspended */ 206 if(!self) return EFAULT; 207 208 memset(&m, 0, sizeof(m)); 209 210 m.m_type = VM_PROCCTL; 211 m.VMPCTL_WHO = ep; 212 m.VMPCTL_PARAM = VMPPARAM_HANDLEMEM; 213 m.VMPCTL_M1 = mem; 214 m.VMPCTL_LEN = len; 215 m.VMPCTL_FLAGS = flags; 216 217 return vm_sendrec(&m); 218 } 219 220 /*===========================================================================* 221 * queuemsg * 222 *===========================================================================*/ 223 static int queuemsg(struct vmnt *vmp) 224 { 225 /* Put request on queue for vmnt */ 226 227 struct worker_thread *queue; 228 229 if (vmp->m_comm.c_req_queue == NULL) { 230 vmp->m_comm.c_req_queue = self; 231 } else { 232 /* Walk the list ... */ 233 queue = vmp->m_comm.c_req_queue; 234 while (queue->w_next != NULL) queue = queue->w_next; 235 236 /* ... and append this worker */ 237 queue->w_next = self; 238 } 239 240 self->w_next = NULL; /* End of list */ 241 sending++; 242 243 return(OK); 244 } 245