1 /*- 2 * Copyright (c) 2019 Tomohiro Kusumi <tkusumi@netbsd.org> 3 * Copyright (c) 2019 The DragonFly Project 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include "fuse.h" 29 30 #include <sys/signalvar.h> 31 #include <sys/kern_syscall.h> 32 33 static MALLOC_DEFINE(M_FUSE_BUF, "fuse_buf", "FUSE buf"); 34 static MALLOC_DEFINE(M_FUSE_IPC, "fuse_ipc", "FUSE ipc"); 35 36 static struct objcache *fuse_ipc_objcache = NULL; 37 static struct objcache_malloc_args fuse_ipc_args = { 38 sizeof(struct fuse_ipc), M_FUSE_IPC, 39 }; 40 41 #if 0 42 static int 43 fuse_block_sigs(sigset_t *oldset) 44 { 45 if (curproc) { 46 sigset_t newset; 47 int error; 48 49 SIGFILLSET(newset); 50 SIGDELSET(newset, SIGKILL); 51 52 error = kern_sigprocmask(SIG_BLOCK, &newset, oldset); 53 KKASSERT(!error); 54 return error; 55 } 56 57 return -1; 58 } 59 60 static int 61 fuse_restore_sigs(sigset_t *oldset) 62 { 63 if (curproc) { 64 int error = kern_sigprocmask(SIG_SETMASK, oldset, NULL); 65 KKASSERT(!error); 66 return error; 67 } 68 69 return -1; 70 } 71 #endif 72 73 void 74 fuse_buf_alloc(struct fuse_buf *fbp, size_t len) 75 { 76 fbp->buf = kmalloc(len, M_FUSE_BUF, M_WAITOK | M_ZERO); 77 KKASSERT(fbp->buf); 78 fbp->len = len; 79 } 80 81 void 82 fuse_buf_free(struct fuse_buf *fbp) 83 { 84 if (fbp->buf) { 85 kfree(fbp->buf, M_FUSE_BUF); 86 fbp->buf = NULL; 87 } 88 fbp->len = 0; 89 } 90 91 struct fuse_ipc* 92 fuse_ipc_get(struct fuse_mount *fmp, size_t len) 93 { 94 struct fuse_ipc *fip; 95 96 fip = objcache_get(fuse_ipc_objcache, M_WAITOK); 97 bzero(fip, sizeof(*fip)); 98 refcount_init(&fip->refcnt, 1); 99 fip->fmp = fmp; 100 fip->unique = atomic_fetchadd_long(&fmp->unique, 1); 101 fip->done = 0; 102 103 fuse_buf_alloc(&fip->request, sizeof(struct fuse_in_header) + len); 104 fip->reply.buf = NULL; 105 106 return fip; 107 } 108 109 void 110 fuse_ipc_put(struct fuse_ipc *fip) 111 { 112 if (refcount_release(&fip->refcnt)) { 113 fuse_buf_free(&fip->request); 114 fuse_buf_free(&fip->reply); 115 objcache_put(fuse_ipc_objcache, fip); 116 } 117 } 118 119 static void 120 fuse_ipc_remove(struct fuse_ipc *fip) 121 { 122 struct fuse_mount *fmp = fip->fmp; 123 struct fuse_ipc *p; 124 125 mtx_lock(&fmp->ipc_lock); 126 TAILQ_FOREACH(p, &fmp->request_head, request_entry) { 127 if (fip == p) { 128 TAILQ_REMOVE(&fmp->request_head, p, request_entry); 129 if (atomic_swap_int(&fip->sent, 1) == -1) 130 wakeup(fip); 131 break; 132 } 133 } 134 TAILQ_FOREACH(p, &fmp->reply_head, reply_entry) { 135 if (fip == p) { 136 TAILQ_REMOVE(&fmp->reply_head, p, reply_entry); 137 break; 138 } 139 } 140 mtx_unlock(&fmp->ipc_lock); 141 } 142 143 void* 144 fuse_ipc_fill(struct fuse_ipc *fip, int op, uint64_t ino, struct ucred *cred) 145 { 146 if (!cred) 147 cred = curthread->td_ucred; 148 149 fuse_fill_in_header(fuse_in(fip), fuse_in_size(fip), op, fip->unique, 150 ino, cred->cr_uid, cred->cr_rgid, 151 curthread->td_proc ? curthread->td_proc->p_pid : 0); 152 153 fuse_dbgipc(fip, 0, ""); 154 155 return fuse_in_data(fip); 156 } 157 158 static int 159 fuse_ipc_wait(struct fuse_ipc *fip) 160 { 161 int error, retry = 0; 162 163 if (fuse_test_dead(fip->fmp)) { 164 KKASSERT(!fuse_ipc_test_replied(fip)); 165 fuse_ipc_set_replied(fip); 166 return ENOTCONN; 167 } 168 169 if (fuse_ipc_test_replied(fip)) 170 return 0; 171 again: 172 tsleep_interlock(fip, 0); 173 if (fuse_ipc_test_replied(fip)) 174 return 0; 175 error = tsleep(fip, PINTERLOCKED, "ftxp", 5 * hz); 176 if (!error) 177 KKASSERT(fuse_ipc_test_replied(fip)); 178 179 if (error == EWOULDBLOCK) { 180 if (!fuse_ipc_test_replied(fip)) { 181 if (!retry) 182 fuse_print("timeout/retry\n"); 183 if (retry++ < 6) 184 goto again; 185 fuse_print("timeout\n"); 186 fuse_ipc_remove(fip); 187 fuse_ipc_set_replied(fip); 188 return ETIMEDOUT; 189 } else 190 fuse_dbg("EWOULDBLOCK lost race\n"); 191 } else if (error) { 192 fuse_print("error=%d\n", error); 193 fuse_ipc_remove(fip); 194 fuse_ipc_set_replied(fip); 195 return error; 196 } 197 198 if (fuse_test_dead(fip->fmp)) { 199 KKASSERT(fuse_ipc_test_replied(fip)); 200 return ENOTCONN; 201 } 202 203 return 0; 204 } 205 206 static int 207 fuse_ipc_wait_sent(struct fuse_ipc *fip) 208 { 209 int error, retry = 0; 210 211 if (fuse_test_dead(fip->fmp)) { 212 KKASSERT(!fuse_ipc_test_replied(fip)); 213 fuse_ipc_remove(fip); 214 fuse_ipc_set_replied(fip); 215 return ENOTCONN; 216 } 217 218 error = 0; 219 220 for (;;) { 221 tsleep_interlock(fip, 0); 222 if (atomic_swap_int(&fip->sent, -1) == 1) { 223 error = 0; 224 break; 225 } 226 error = tsleep(fip, PINTERLOCKED, "ftxp", 5 * hz); 227 if (error == EWOULDBLOCK) { 228 ++retry; 229 if (retry == 6) { 230 fuse_print("timeout\n"); 231 error = ETIMEDOUT; 232 break; 233 } 234 fuse_print("timeout/retry\n"); 235 } 236 } 237 if (fuse_test_dead(fip->fmp)) 238 error = ENOTCONN; 239 if (error) { 240 fuse_ipc_remove(fip); 241 fuse_ipc_set_replied(fip); 242 } 243 return error; 244 } 245 246 int 247 fuse_ipc_tx(struct fuse_ipc *fip) 248 { 249 struct fuse_mount *fmp = fip->fmp; 250 struct fuse_out_header *ohd; 251 int error; 252 253 if (fuse_test_dead(fmp)) { 254 fuse_ipc_put(fip); 255 return ENOTCONN; 256 } 257 258 mtx_lock(&fmp->ipc_lock); 259 TAILQ_INSERT_TAIL(&fmp->reply_head, fip, reply_entry); 260 TAILQ_INSERT_TAIL(&fmp->request_head, fip, request_entry); 261 mtx_unlock(&fmp->ipc_lock); 262 263 wakeup(fmp); 264 KNOTE(&fmp->kq.ki_note, 0); 265 266 error = fuse_ipc_wait(fip); 267 KKASSERT(fuse_ipc_test_replied(fip)); 268 if (error) { 269 fuse_dbgipc(fip, error, "ipc_wait"); 270 fuse_ipc_put(fip); 271 return error; 272 } 273 274 ohd = fuse_out(fip); 275 KKASSERT(ohd); 276 error = ohd->error; 277 if (error) { 278 fuse_dbgipc(fip, error, "ipc_error"); 279 fuse_ipc_put(fip); 280 if (error < 0) 281 error = -error; 282 return error; 283 } 284 fuse_dbgipc(fip, 0, "done"); 285 286 return 0; 287 } 288 289 int 290 fuse_ipc_tx_noreply(struct fuse_ipc *fip) 291 { 292 struct fuse_mount *fmp = fip->fmp; 293 int error; 294 295 if (fuse_test_dead(fmp)) { 296 fuse_ipc_put(fip); 297 return ENOTCONN; 298 } 299 300 mtx_lock(&fmp->ipc_lock); 301 TAILQ_INSERT_TAIL(&fmp->request_head, fip, request_entry); 302 mtx_unlock(&fmp->ipc_lock); 303 304 wakeup(fmp); 305 KNOTE(&fmp->kq.ki_note, 0); 306 307 error = fuse_ipc_wait_sent(fip); 308 if (error) 309 fuse_ipc_put(fip); 310 return error; 311 } 312 313 void 314 fuse_ipc_init(void) 315 { 316 fuse_ipc_objcache = objcache_create("fuse_ipc", 0, 0, 317 NULL, NULL, NULL, 318 objcache_malloc_alloc_zero, objcache_malloc_free, &fuse_ipc_args); 319 } 320 321 void 322 fuse_ipc_cleanup(void) 323 { 324 objcache_destroy(fuse_ipc_objcache); 325 } 326