1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <linux/completion.h> 39 #include <linux/file.h> 40 #include <linux/mutex.h> 41 #include <linux/poll.h> 42 #include <linux/sched.h> 43 #include <linux/idr.h> 44 #include <linux/in.h> 45 #include <linux/in6.h> 46 #include <linux/miscdevice.h> 47 #include <linux/slab.h> 48 #include <linux/module.h> 49 50 #include <sys/filio.h> 51 52 #include <rdma/rdma_user_cm.h> 53 #include <rdma/ib_marshall.h> 54 #include <rdma/rdma_cm.h> 55 #include <rdma/rdma_cm_ib.h> 56 #include <rdma/ib_addr.h> 57 #include <rdma/ib.h> 58 59 MODULE_AUTHOR("Sean Hefty"); 60 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 static unsigned int max_backlog = 1024; 64 65 struct ucma_file { 66 struct mutex mut; 67 struct file *filp; 68 struct list_head ctx_list; 69 struct list_head event_list; 70 wait_queue_head_t poll_wait; 71 struct workqueue_struct *close_wq; 72 }; 73 74 struct ucma_context { 75 int id; 76 struct completion comp; 77 atomic_t ref; 78 int events_reported; 79 int backlog; 80 81 struct ucma_file *file; 82 struct rdma_cm_id *cm_id; 83 u64 uid; 84 85 struct list_head list; 86 struct list_head mc_list; 87 /* mark that device is in process of destroying the internal HW 88 * resources, protected by the global mut 89 */ 90 int closing; 91 /* sync between removal event and id destroy, protected by file mut */ 92 int destroying; 93 struct work_struct close_work; 94 }; 95 96 struct ucma_multicast { 97 struct ucma_context *ctx; 98 int id; 99 int events_reported; 100 101 u64 uid; 102 u8 join_state; 103 struct list_head list; 104 struct sockaddr_storage addr; 105 }; 106 107 struct ucma_event { 108 struct ucma_context *ctx; 109 struct ucma_multicast *mc; 110 struct list_head list; 111 struct rdma_cm_id *cm_id; 112 struct rdma_ucm_event_resp resp; 113 struct work_struct close_work; 114 }; 115 116 static DEFINE_MUTEX(mut); 117 static DEFINE_IDR(ctx_idr); 118 static DEFINE_IDR(multicast_idr); 119 120 static inline struct ucma_context *_ucma_find_context(int id, 121 struct ucma_file *file) 122 { 123 struct ucma_context *ctx; 124 125 ctx = idr_find(&ctx_idr, id); 126 if (!ctx) 127 ctx = ERR_PTR(-ENOENT); 128 else if (ctx->file != file || !ctx->cm_id) 129 ctx = ERR_PTR(-EINVAL); 130 return ctx; 131 } 132 133 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 134 { 135 struct ucma_context *ctx; 136 137 mutex_lock(&mut); 138 ctx = _ucma_find_context(id, file); 139 if (!IS_ERR(ctx)) { 140 if (ctx->closing) 141 ctx = ERR_PTR(-EIO); 142 else 143 atomic_inc(&ctx->ref); 144 } 145 mutex_unlock(&mut); 146 return ctx; 147 } 148 149 static void ucma_put_ctx(struct ucma_context *ctx) 150 { 151 if (atomic_dec_and_test(&ctx->ref)) 152 complete(&ctx->comp); 153 } 154 155 /* 156 * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the 157 * CM_ID is bound. 158 */ 159 static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) 160 { 161 struct ucma_context *ctx = ucma_get_ctx(file, id); 162 163 if (IS_ERR(ctx)) 164 return ctx; 165 if (!ctx->cm_id->device) { 166 ucma_put_ctx(ctx); 167 return ERR_PTR(-EINVAL); 168 } 169 return ctx; 170 } 171 172 static void ucma_close_event_id(struct work_struct *work) 173 { 174 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); 175 176 rdma_destroy_id(uevent_close->cm_id); 177 kfree(uevent_close); 178 } 179 180 static void ucma_close_id(struct work_struct *work) 181 { 182 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); 183 184 /* once all inflight tasks are finished, we close all underlying 185 * resources. The context is still alive till its explicit destryoing 186 * by its creator. 187 */ 188 ucma_put_ctx(ctx); 189 wait_for_completion(&ctx->comp); 190 /* No new events will be generated after destroying the id. */ 191 rdma_destroy_id(ctx->cm_id); 192 } 193 194 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 195 { 196 struct ucma_context *ctx; 197 198 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 199 if (!ctx) 200 return NULL; 201 202 INIT_WORK(&ctx->close_work, ucma_close_id); 203 atomic_set(&ctx->ref, 1); 204 init_completion(&ctx->comp); 205 INIT_LIST_HEAD(&ctx->mc_list); 206 ctx->file = file; 207 208 mutex_lock(&mut); 209 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); 210 mutex_unlock(&mut); 211 if (ctx->id < 0) 212 goto error; 213 214 list_add_tail(&ctx->list, &file->ctx_list); 215 return ctx; 216 217 error: 218 kfree(ctx); 219 return NULL; 220 } 221 222 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) 223 { 224 struct ucma_multicast *mc; 225 226 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 227 if (!mc) 228 return NULL; 229 230 mutex_lock(&mut); 231 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); 232 mutex_unlock(&mut); 233 if (mc->id < 0) 234 goto error; 235 236 mc->ctx = ctx; 237 list_add_tail(&mc->list, &ctx->mc_list); 238 return mc; 239 240 error: 241 kfree(mc); 242 return NULL; 243 } 244 245 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 246 struct rdma_conn_param *src) 247 { 248 if (src->private_data_len) 249 memcpy(dst->private_data, src->private_data, 250 src->private_data_len); 251 dst->private_data_len = src->private_data_len; 252 dst->responder_resources =src->responder_resources; 253 dst->initiator_depth = src->initiator_depth; 254 dst->flow_control = src->flow_control; 255 dst->retry_count = src->retry_count; 256 dst->rnr_retry_count = src->rnr_retry_count; 257 dst->srq = src->srq; 258 dst->qp_num = src->qp_num; 259 } 260 261 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, 262 struct rdma_ud_param *src) 263 { 264 if (src->private_data_len) 265 memcpy(dst->private_data, src->private_data, 266 src->private_data_len); 267 dst->private_data_len = src->private_data_len; 268 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); 269 dst->qp_num = src->qp_num; 270 dst->qkey = src->qkey; 271 } 272 273 static void ucma_set_event_context(struct ucma_context *ctx, 274 struct rdma_cm_event *event, 275 struct ucma_event *uevent) 276 { 277 uevent->ctx = ctx; 278 switch (event->event) { 279 case RDMA_CM_EVENT_MULTICAST_JOIN: 280 case RDMA_CM_EVENT_MULTICAST_ERROR: 281 uevent->mc = __DECONST(struct ucma_multicast *, 282 event->param.ud.private_data); 283 uevent->resp.uid = uevent->mc->uid; 284 uevent->resp.id = uevent->mc->id; 285 break; 286 default: 287 uevent->resp.uid = ctx->uid; 288 uevent->resp.id = ctx->id; 289 break; 290 } 291 } 292 293 /* Called with file->mut locked for the relevant context. */ 294 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) 295 { 296 struct ucma_context *ctx = cm_id->context; 297 struct ucma_event *con_req_eve; 298 int event_found = 0; 299 300 if (ctx->destroying) 301 return; 302 303 /* only if context is pointing to cm_id that it owns it and can be 304 * queued to be closed, otherwise that cm_id is an inflight one that 305 * is part of that context event list pending to be detached and 306 * reattached to its new context as part of ucma_get_event, 307 * handled separately below. 308 */ 309 if (ctx->cm_id == cm_id) { 310 mutex_lock(&mut); 311 ctx->closing = 1; 312 mutex_unlock(&mut); 313 queue_work(ctx->file->close_wq, &ctx->close_work); 314 return; 315 } 316 317 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { 318 if (con_req_eve->cm_id == cm_id && 319 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 320 list_del(&con_req_eve->list); 321 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id); 322 queue_work(ctx->file->close_wq, &con_req_eve->close_work); 323 event_found = 1; 324 break; 325 } 326 } 327 if (!event_found) 328 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n"); 329 } 330 331 static int ucma_event_handler(struct rdma_cm_id *cm_id, 332 struct rdma_cm_event *event) 333 { 334 struct ucma_event *uevent; 335 struct ucma_context *ctx = cm_id->context; 336 int ret = 0; 337 338 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 339 if (!uevent) 340 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 341 342 mutex_lock(&ctx->file->mut); 343 uevent->cm_id = cm_id; 344 ucma_set_event_context(ctx, event, uevent); 345 uevent->resp.event = event->event; 346 uevent->resp.status = event->status; 347 if (cm_id->qp_type == IB_QPT_UD) 348 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); 349 else 350 ucma_copy_conn_event(&uevent->resp.param.conn, 351 &event->param.conn); 352 353 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 354 if (!ctx->backlog) { 355 ret = -ENOMEM; 356 kfree(uevent); 357 goto out; 358 } 359 ctx->backlog--; 360 } else if (!ctx->uid || ctx->cm_id != cm_id) { 361 /* 362 * We ignore events for new connections until userspace has set 363 * their context. This can only happen if an error occurs on a 364 * new connection before the user accepts it. This is okay, 365 * since the accept will just fail later. However, we do need 366 * to release the underlying HW resources in case of a device 367 * removal event. 368 */ 369 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 370 ucma_removal_event_handler(cm_id); 371 372 kfree(uevent); 373 goto out; 374 } 375 376 list_add_tail(&uevent->list, &ctx->file->event_list); 377 wake_up_interruptible(&ctx->file->poll_wait); 378 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 379 ucma_removal_event_handler(cm_id); 380 out: 381 mutex_unlock(&ctx->file->mut); 382 return ret; 383 } 384 385 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 386 int in_len, int out_len) 387 { 388 struct ucma_context *ctx; 389 struct rdma_ucm_get_event cmd; 390 struct ucma_event *uevent; 391 int ret = 0; 392 393 if (out_len < sizeof uevent->resp) 394 return -ENOSPC; 395 396 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 397 return -EFAULT; 398 399 mutex_lock(&file->mut); 400 while (list_empty(&file->event_list)) { 401 mutex_unlock(&file->mut); 402 403 if (file->filp->f_flags & O_NONBLOCK) 404 return -EAGAIN; 405 406 if (wait_event_interruptible(file->poll_wait, 407 !list_empty(&file->event_list))) 408 return -ERESTARTSYS; 409 410 mutex_lock(&file->mut); 411 } 412 413 uevent = list_entry(file->event_list.next, struct ucma_event, list); 414 415 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 416 ctx = ucma_alloc_ctx(file); 417 if (!ctx) { 418 ret = -ENOMEM; 419 goto done; 420 } 421 uevent->ctx->backlog++; 422 ctx->cm_id = uevent->cm_id; 423 ctx->cm_id->context = ctx; 424 uevent->resp.id = ctx->id; 425 } 426 427 if (copy_to_user((void __user *)(unsigned long)cmd.response, 428 &uevent->resp, sizeof uevent->resp)) { 429 ret = -EFAULT; 430 goto done; 431 } 432 433 list_del(&uevent->list); 434 uevent->ctx->events_reported++; 435 if (uevent->mc) 436 uevent->mc->events_reported++; 437 kfree(uevent); 438 done: 439 mutex_unlock(&file->mut); 440 return ret; 441 } 442 443 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) 444 { 445 switch (cmd->ps) { 446 case RDMA_PS_TCP: 447 *qp_type = IB_QPT_RC; 448 return 0; 449 case RDMA_PS_UDP: 450 case RDMA_PS_IPOIB: 451 *qp_type = IB_QPT_UD; 452 return 0; 453 case RDMA_PS_IB: 454 *qp_type = cmd->qp_type; 455 return 0; 456 default: 457 return -EINVAL; 458 } 459 } 460 461 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, 462 int in_len, int out_len) 463 { 464 struct rdma_ucm_create_id cmd; 465 struct rdma_ucm_create_id_resp resp; 466 struct ucma_context *ctx; 467 struct rdma_cm_id *cm_id; 468 enum ib_qp_type qp_type; 469 int ret; 470 471 if (out_len < sizeof(resp)) 472 return -ENOSPC; 473 474 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 475 return -EFAULT; 476 477 ret = ucma_get_qp_type(&cmd, &qp_type); 478 if (ret) 479 return ret; 480 481 mutex_lock(&file->mut); 482 ctx = ucma_alloc_ctx(file); 483 mutex_unlock(&file->mut); 484 if (!ctx) 485 return -ENOMEM; 486 487 ctx->uid = cmd.uid; 488 cm_id = rdma_create_id(TD_TO_VNET(curthread), 489 ucma_event_handler, ctx, cmd.ps, qp_type); 490 if (IS_ERR(cm_id)) { 491 ret = PTR_ERR(cm_id); 492 goto err1; 493 } 494 495 resp.id = ctx->id; 496 if (copy_to_user((void __user *)(unsigned long)cmd.response, 497 &resp, sizeof(resp))) { 498 ret = -EFAULT; 499 goto err2; 500 } 501 502 ctx->cm_id = cm_id; 503 return 0; 504 505 err2: 506 rdma_destroy_id(cm_id); 507 err1: 508 mutex_lock(&mut); 509 idr_remove(&ctx_idr, ctx->id); 510 mutex_unlock(&mut); 511 kfree(ctx); 512 return ret; 513 } 514 515 static void ucma_cleanup_multicast(struct ucma_context *ctx) 516 { 517 struct ucma_multicast *mc, *tmp; 518 519 mutex_lock(&mut); 520 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { 521 list_del(&mc->list); 522 idr_remove(&multicast_idr, mc->id); 523 kfree(mc); 524 } 525 mutex_unlock(&mut); 526 } 527 528 static void ucma_cleanup_mc_events(struct ucma_multicast *mc) 529 { 530 struct ucma_event *uevent, *tmp; 531 532 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { 533 if (uevent->mc != mc) 534 continue; 535 536 list_del(&uevent->list); 537 kfree(uevent); 538 } 539 } 540 541 /* 542 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At 543 * this point, no new events will be reported from the hardware. However, we 544 * still need to cleanup the UCMA context for this ID. Specifically, there 545 * might be events that have not yet been consumed by the user space software. 546 * These might include pending connect requests which we have not completed 547 * processing. We cannot call rdma_destroy_id while holding the lock of the 548 * context (file->mut), as it might cause a deadlock. We therefore extract all 549 * relevant events from the context pending events list while holding the 550 * mutex. After that we release them as needed. 551 */ 552 static int ucma_free_ctx(struct ucma_context *ctx) 553 { 554 int events_reported; 555 struct ucma_event *uevent, *tmp; 556 LIST_HEAD(list); 557 558 559 ucma_cleanup_multicast(ctx); 560 561 /* Cleanup events not yet reported to the user. */ 562 mutex_lock(&ctx->file->mut); 563 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 564 if (uevent->ctx == ctx) 565 list_move_tail(&uevent->list, &list); 566 } 567 list_del(&ctx->list); 568 mutex_unlock(&ctx->file->mut); 569 570 list_for_each_entry_safe(uevent, tmp, &list, list) { 571 list_del(&uevent->list); 572 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 573 rdma_destroy_id(uevent->cm_id); 574 kfree(uevent); 575 } 576 577 events_reported = ctx->events_reported; 578 kfree(ctx); 579 return events_reported; 580 } 581 582 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 583 int in_len, int out_len) 584 { 585 struct rdma_ucm_destroy_id cmd; 586 struct rdma_ucm_destroy_id_resp resp; 587 struct ucma_context *ctx; 588 int ret = 0; 589 590 if (out_len < sizeof(resp)) 591 return -ENOSPC; 592 593 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 594 return -EFAULT; 595 596 mutex_lock(&mut); 597 ctx = _ucma_find_context(cmd.id, file); 598 if (!IS_ERR(ctx)) 599 idr_remove(&ctx_idr, ctx->id); 600 mutex_unlock(&mut); 601 602 if (IS_ERR(ctx)) 603 return PTR_ERR(ctx); 604 605 mutex_lock(&ctx->file->mut); 606 ctx->destroying = 1; 607 mutex_unlock(&ctx->file->mut); 608 609 flush_workqueue(ctx->file->close_wq); 610 /* At this point it's guaranteed that there is no inflight 611 * closing task */ 612 mutex_lock(&mut); 613 if (!ctx->closing) { 614 mutex_unlock(&mut); 615 ucma_put_ctx(ctx); 616 wait_for_completion(&ctx->comp); 617 rdma_destroy_id(ctx->cm_id); 618 } else { 619 mutex_unlock(&mut); 620 } 621 622 resp.events_reported = ucma_free_ctx(ctx); 623 if (copy_to_user((void __user *)(unsigned long)cmd.response, 624 &resp, sizeof(resp))) 625 ret = -EFAULT; 626 627 return ret; 628 } 629 630 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, 631 int in_len, int out_len) 632 { 633 struct rdma_ucm_bind_ip cmd; 634 struct ucma_context *ctx; 635 int ret; 636 637 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 638 return -EFAULT; 639 640 if (!rdma_addr_size_in6(&cmd.addr)) 641 return -EINVAL; 642 643 ctx = ucma_get_ctx(file, cmd.id); 644 if (IS_ERR(ctx)) 645 return PTR_ERR(ctx); 646 647 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 648 ucma_put_ctx(ctx); 649 return ret; 650 } 651 652 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, 653 int in_len, int out_len) 654 { 655 struct rdma_ucm_bind cmd; 656 struct ucma_context *ctx; 657 int ret; 658 659 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 660 return -EFAULT; 661 662 if (cmd.reserved || !cmd.addr_size || 663 cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) 664 return -EINVAL; 665 666 ctx = ucma_get_ctx(file, cmd.id); 667 if (IS_ERR(ctx)) 668 return PTR_ERR(ctx); 669 670 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 671 ucma_put_ctx(ctx); 672 return ret; 673 } 674 675 static ssize_t ucma_resolve_ip(struct ucma_file *file, 676 const char __user *inbuf, 677 int in_len, int out_len) 678 { 679 struct rdma_ucm_resolve_ip cmd; 680 struct ucma_context *ctx; 681 int ret; 682 683 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 684 return -EFAULT; 685 686 if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || 687 !rdma_addr_size_in6(&cmd.dst_addr)) 688 return -EINVAL; 689 690 ctx = ucma_get_ctx(file, cmd.id); 691 if (IS_ERR(ctx)) 692 return PTR_ERR(ctx); 693 694 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 695 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 696 ucma_put_ctx(ctx); 697 return ret; 698 } 699 700 static ssize_t ucma_resolve_addr(struct ucma_file *file, 701 const char __user *inbuf, 702 int in_len, int out_len) 703 { 704 struct rdma_ucm_resolve_addr cmd; 705 struct ucma_context *ctx; 706 int ret; 707 708 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 709 return -EFAULT; 710 711 if (cmd.reserved || 712 (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || 713 !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) 714 return -EINVAL; 715 716 ctx = ucma_get_ctx(file, cmd.id); 717 if (IS_ERR(ctx)) 718 return PTR_ERR(ctx); 719 720 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 721 (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); 722 ucma_put_ctx(ctx); 723 return ret; 724 } 725 726 static ssize_t ucma_resolve_route(struct ucma_file *file, 727 const char __user *inbuf, 728 int in_len, int out_len) 729 { 730 struct rdma_ucm_resolve_route cmd; 731 struct ucma_context *ctx; 732 int ret; 733 734 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 735 return -EFAULT; 736 737 ctx = ucma_get_ctx_dev(file, cmd.id); 738 if (IS_ERR(ctx)) 739 return PTR_ERR(ctx); 740 741 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 742 ucma_put_ctx(ctx); 743 return ret; 744 } 745 746 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 747 struct rdma_route *route) 748 { 749 struct rdma_dev_addr *dev_addr; 750 751 resp->num_paths = route->num_paths; 752 switch (route->num_paths) { 753 case 0: 754 dev_addr = &route->addr.dev_addr; 755 rdma_addr_get_dgid(dev_addr, 756 (union ib_gid *) &resp->ib_route[0].dgid); 757 rdma_addr_get_sgid(dev_addr, 758 (union ib_gid *) &resp->ib_route[0].sgid); 759 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 760 break; 761 case 2: 762 ib_copy_path_rec_to_user(&resp->ib_route[1], 763 &route->path_rec[1]); 764 /* fall through */ 765 case 1: 766 ib_copy_path_rec_to_user(&resp->ib_route[0], 767 &route->path_rec[0]); 768 break; 769 default: 770 break; 771 } 772 } 773 774 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, 775 struct rdma_route *route) 776 { 777 778 resp->num_paths = route->num_paths; 779 switch (route->num_paths) { 780 case 0: 781 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, 782 (union ib_gid *)&resp->ib_route[0].dgid); 783 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, 784 (union ib_gid *)&resp->ib_route[0].sgid); 785 resp->ib_route[0].pkey = cpu_to_be16(0xffff); 786 break; 787 case 2: 788 ib_copy_path_rec_to_user(&resp->ib_route[1], 789 &route->path_rec[1]); 790 /* fall through */ 791 case 1: 792 ib_copy_path_rec_to_user(&resp->ib_route[0], 793 &route->path_rec[0]); 794 break; 795 default: 796 break; 797 } 798 } 799 800 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, 801 struct rdma_route *route) 802 { 803 struct rdma_dev_addr *dev_addr; 804 805 dev_addr = &route->addr.dev_addr; 806 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); 807 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); 808 } 809 810 static ssize_t ucma_query_route(struct ucma_file *file, 811 const char __user *inbuf, 812 int in_len, int out_len) 813 { 814 struct rdma_ucm_query cmd; 815 struct rdma_ucm_query_route_resp resp; 816 struct ucma_context *ctx; 817 struct sockaddr *addr; 818 int ret = 0; 819 820 if (out_len < sizeof(resp)) 821 return -ENOSPC; 822 823 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 824 return -EFAULT; 825 826 ctx = ucma_get_ctx(file, cmd.id); 827 if (IS_ERR(ctx)) 828 return PTR_ERR(ctx); 829 830 memset(&resp, 0, sizeof resp); 831 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 832 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 833 sizeof(struct sockaddr_in) : 834 sizeof(struct sockaddr_in6)); 835 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 836 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 837 sizeof(struct sockaddr_in) : 838 sizeof(struct sockaddr_in6)); 839 if (!ctx->cm_id->device) 840 goto out; 841 842 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; 843 resp.port_num = ctx->cm_id->port_num; 844 845 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) 846 ucma_copy_ib_route(&resp, &ctx->cm_id->route); 847 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) 848 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); 849 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) 850 ucma_copy_iw_route(&resp, &ctx->cm_id->route); 851 852 out: 853 if (copy_to_user((void __user *)(unsigned long)cmd.response, 854 &resp, sizeof(resp))) 855 ret = -EFAULT; 856 857 ucma_put_ctx(ctx); 858 return ret; 859 } 860 861 static void ucma_query_device_addr(struct rdma_cm_id *cm_id, 862 struct rdma_ucm_query_addr_resp *resp) 863 { 864 if (!cm_id->device) 865 return; 866 867 resp->node_guid = (__force __u64) cm_id->device->node_guid; 868 resp->port_num = cm_id->port_num; 869 resp->pkey = (__force __u16) cpu_to_be16( 870 ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); 871 } 872 873 static ssize_t ucma_query_addr(struct ucma_context *ctx, 874 void __user *response, int out_len) 875 { 876 struct rdma_ucm_query_addr_resp resp; 877 struct sockaddr *addr; 878 int ret = 0; 879 880 if (out_len < sizeof(resp)) 881 return -ENOSPC; 882 883 memset(&resp, 0, sizeof resp); 884 885 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 886 resp.src_size = rdma_addr_size(addr); 887 memcpy(&resp.src_addr, addr, resp.src_size); 888 889 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 890 resp.dst_size = rdma_addr_size(addr); 891 memcpy(&resp.dst_addr, addr, resp.dst_size); 892 893 ucma_query_device_addr(ctx->cm_id, &resp); 894 895 if (copy_to_user(response, &resp, sizeof(resp))) 896 ret = -EFAULT; 897 898 return ret; 899 } 900 901 static ssize_t ucma_query_path(struct ucma_context *ctx, 902 void __user *response, int out_len) 903 { 904 struct rdma_ucm_query_path_resp *resp; 905 int i, ret = 0; 906 907 if (out_len < sizeof(*resp)) 908 return -ENOSPC; 909 910 resp = kzalloc(out_len, GFP_KERNEL); 911 if (!resp) 912 return -ENOMEM; 913 914 resp->num_paths = ctx->cm_id->route.num_paths; 915 for (i = 0, out_len -= sizeof(*resp); 916 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); 917 i++, out_len -= sizeof(struct ib_path_rec_data)) { 918 919 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | 920 IB_PATH_BIDIRECTIONAL; 921 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], 922 &resp->path_data[i].path_rec); 923 } 924 925 if (copy_to_user(response, resp, 926 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) 927 ret = -EFAULT; 928 929 kfree(resp); 930 return ret; 931 } 932 933 static ssize_t ucma_query_gid(struct ucma_context *ctx, 934 void __user *response, int out_len) 935 { 936 struct rdma_ucm_query_addr_resp resp; 937 struct sockaddr_ib *addr; 938 int ret = 0; 939 940 if (out_len < sizeof(resp)) 941 return -ENOSPC; 942 943 memset(&resp, 0, sizeof resp); 944 945 ucma_query_device_addr(ctx->cm_id, &resp); 946 947 addr = (struct sockaddr_ib *) &resp.src_addr; 948 resp.src_size = sizeof(*addr); 949 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { 950 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); 951 } else { 952 addr->sib_family = AF_IB; 953 addr->sib_pkey = (__force __be16) resp.pkey; 954 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, 955 (union ib_gid *) &addr->sib_addr); 956 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 957 &ctx->cm_id->route.addr.src_addr); 958 } 959 960 addr = (struct sockaddr_ib *) &resp.dst_addr; 961 resp.dst_size = sizeof(*addr); 962 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { 963 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); 964 } else { 965 addr->sib_family = AF_IB; 966 addr->sib_pkey = (__force __be16) resp.pkey; 967 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, 968 (union ib_gid *) &addr->sib_addr); 969 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) 970 &ctx->cm_id->route.addr.dst_addr); 971 } 972 973 if (copy_to_user(response, &resp, sizeof(resp))) 974 ret = -EFAULT; 975 976 return ret; 977 } 978 979 static ssize_t ucma_query(struct ucma_file *file, 980 const char __user *inbuf, 981 int in_len, int out_len) 982 { 983 struct rdma_ucm_query cmd; 984 struct ucma_context *ctx; 985 void __user *response; 986 int ret; 987 988 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 989 return -EFAULT; 990 991 response = (void __user *)(unsigned long) cmd.response; 992 ctx = ucma_get_ctx(file, cmd.id); 993 if (IS_ERR(ctx)) 994 return PTR_ERR(ctx); 995 996 switch (cmd.option) { 997 case RDMA_USER_CM_QUERY_ADDR: 998 ret = ucma_query_addr(ctx, response, out_len); 999 break; 1000 case RDMA_USER_CM_QUERY_PATH: 1001 ret = ucma_query_path(ctx, response, out_len); 1002 break; 1003 case RDMA_USER_CM_QUERY_GID: 1004 ret = ucma_query_gid(ctx, response, out_len); 1005 break; 1006 default: 1007 ret = -ENOSYS; 1008 break; 1009 } 1010 1011 ucma_put_ctx(ctx); 1012 return ret; 1013 } 1014 1015 static void ucma_copy_conn_param(struct rdma_cm_id *id, 1016 struct rdma_conn_param *dst, 1017 struct rdma_ucm_conn_param *src) 1018 { 1019 dst->private_data = src->private_data; 1020 dst->private_data_len = src->private_data_len; 1021 dst->responder_resources =src->responder_resources; 1022 dst->initiator_depth = src->initiator_depth; 1023 dst->flow_control = src->flow_control; 1024 dst->retry_count = src->retry_count; 1025 dst->rnr_retry_count = src->rnr_retry_count; 1026 dst->srq = src->srq; 1027 dst->qp_num = src->qp_num; 1028 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; 1029 } 1030 1031 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 1032 int in_len, int out_len) 1033 { 1034 struct rdma_ucm_connect cmd; 1035 struct rdma_conn_param conn_param; 1036 struct ucma_context *ctx; 1037 int ret; 1038 1039 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1040 return -EFAULT; 1041 1042 if (!cmd.conn_param.valid) 1043 return -EINVAL; 1044 1045 ctx = ucma_get_ctx_dev(file, cmd.id); 1046 if (IS_ERR(ctx)) 1047 return PTR_ERR(ctx); 1048 1049 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1050 ret = rdma_connect(ctx->cm_id, &conn_param); 1051 ucma_put_ctx(ctx); 1052 return ret; 1053 } 1054 1055 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 1056 int in_len, int out_len) 1057 { 1058 struct rdma_ucm_listen cmd; 1059 struct ucma_context *ctx; 1060 int ret; 1061 1062 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1063 return -EFAULT; 1064 1065 ctx = ucma_get_ctx(file, cmd.id); 1066 if (IS_ERR(ctx)) 1067 return PTR_ERR(ctx); 1068 1069 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? 1070 cmd.backlog : max_backlog; 1071 ret = rdma_listen(ctx->cm_id, ctx->backlog); 1072 ucma_put_ctx(ctx); 1073 return ret; 1074 } 1075 1076 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 1077 int in_len, int out_len) 1078 { 1079 struct rdma_ucm_accept cmd; 1080 struct rdma_conn_param conn_param; 1081 struct ucma_context *ctx; 1082 int ret; 1083 1084 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1085 return -EFAULT; 1086 1087 ctx = ucma_get_ctx_dev(file, cmd.id); 1088 if (IS_ERR(ctx)) 1089 return PTR_ERR(ctx); 1090 1091 if (cmd.conn_param.valid) { 1092 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1093 mutex_lock(&file->mut); 1094 ret = rdma_accept(ctx->cm_id, &conn_param); 1095 if (!ret) 1096 ctx->uid = cmd.uid; 1097 mutex_unlock(&file->mut); 1098 } else 1099 ret = rdma_accept(ctx->cm_id, NULL); 1100 1101 ucma_put_ctx(ctx); 1102 return ret; 1103 } 1104 1105 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 1106 int in_len, int out_len) 1107 { 1108 struct rdma_ucm_reject cmd; 1109 struct ucma_context *ctx; 1110 int ret; 1111 1112 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1113 return -EFAULT; 1114 1115 ctx = ucma_get_ctx_dev(file, cmd.id); 1116 if (IS_ERR(ctx)) 1117 return PTR_ERR(ctx); 1118 1119 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 1120 ucma_put_ctx(ctx); 1121 return ret; 1122 } 1123 1124 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 1125 int in_len, int out_len) 1126 { 1127 struct rdma_ucm_disconnect cmd; 1128 struct ucma_context *ctx; 1129 int ret; 1130 1131 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1132 return -EFAULT; 1133 1134 ctx = ucma_get_ctx_dev(file, cmd.id); 1135 if (IS_ERR(ctx)) 1136 return PTR_ERR(ctx); 1137 1138 ret = rdma_disconnect(ctx->cm_id); 1139 ucma_put_ctx(ctx); 1140 return ret; 1141 } 1142 1143 static ssize_t ucma_init_qp_attr(struct ucma_file *file, 1144 const char __user *inbuf, 1145 int in_len, int out_len) 1146 { 1147 struct rdma_ucm_init_qp_attr cmd; 1148 struct ib_uverbs_qp_attr resp; 1149 struct ucma_context *ctx; 1150 struct ib_qp_attr qp_attr; 1151 int ret; 1152 1153 if (out_len < sizeof(resp)) 1154 return -ENOSPC; 1155 1156 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1157 return -EFAULT; 1158 1159 ctx = ucma_get_ctx_dev(file, cmd.id); 1160 if (IS_ERR(ctx)) 1161 return PTR_ERR(ctx); 1162 1163 resp.qp_attr_mask = 0; 1164 memset(&qp_attr, 0, sizeof qp_attr); 1165 qp_attr.qp_state = cmd.qp_state; 1166 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 1167 if (ret) 1168 goto out; 1169 1170 ib_copy_qp_attr_to_user(&resp, &qp_attr); 1171 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1172 &resp, sizeof(resp))) 1173 ret = -EFAULT; 1174 1175 out: 1176 ucma_put_ctx(ctx); 1177 return ret; 1178 } 1179 1180 static int ucma_set_option_id(struct ucma_context *ctx, int optname, 1181 void *optval, size_t optlen) 1182 { 1183 int ret = 0; 1184 1185 switch (optname) { 1186 case RDMA_OPTION_ID_TOS: 1187 if (optlen != sizeof(u8)) { 1188 ret = -EINVAL; 1189 break; 1190 } 1191 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 1192 break; 1193 case RDMA_OPTION_ID_REUSEADDR: 1194 if (optlen != sizeof(int)) { 1195 ret = -EINVAL; 1196 break; 1197 } 1198 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); 1199 break; 1200 case RDMA_OPTION_ID_AFONLY: 1201 if (optlen != sizeof(int)) { 1202 ret = -EINVAL; 1203 break; 1204 } 1205 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); 1206 break; 1207 case RDMA_OPTION_ID_ACK_TIMEOUT: 1208 if (optlen != sizeof(u8)) { 1209 ret = -EINVAL; 1210 break; 1211 } 1212 ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval)); 1213 break; 1214 default: 1215 ret = -ENOSYS; 1216 } 1217 1218 return ret; 1219 } 1220 1221 static int ucma_set_ib_path(struct ucma_context *ctx, 1222 struct ib_path_rec_data *path_data, size_t optlen) 1223 { 1224 struct ib_sa_path_rec sa_path; 1225 struct rdma_cm_event event; 1226 int ret; 1227 1228 if (optlen % sizeof(*path_data)) 1229 return -EINVAL; 1230 1231 for (; optlen; optlen -= sizeof(*path_data), path_data++) { 1232 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | 1233 IB_PATH_BIDIRECTIONAL)) 1234 break; 1235 } 1236 1237 if (!optlen) 1238 return -EINVAL; 1239 1240 memset(&sa_path, 0, sizeof(sa_path)); 1241 1242 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1243 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1244 if (ret) 1245 return ret; 1246 1247 memset(&event, 0, sizeof event); 1248 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1249 return ucma_event_handler(ctx->cm_id, &event); 1250 } 1251 1252 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, 1253 void *optval, size_t optlen) 1254 { 1255 int ret; 1256 1257 switch (optname) { 1258 case RDMA_OPTION_IB_PATH: 1259 ret = ucma_set_ib_path(ctx, optval, optlen); 1260 break; 1261 default: 1262 ret = -ENOSYS; 1263 } 1264 1265 return ret; 1266 } 1267 1268 static int ucma_set_option_level(struct ucma_context *ctx, int level, 1269 int optname, void *optval, size_t optlen) 1270 { 1271 int ret; 1272 1273 switch (level) { 1274 case RDMA_OPTION_ID: 1275 ret = ucma_set_option_id(ctx, optname, optval, optlen); 1276 break; 1277 case RDMA_OPTION_IB: 1278 ret = ucma_set_option_ib(ctx, optname, optval, optlen); 1279 break; 1280 default: 1281 ret = -ENOSYS; 1282 } 1283 1284 return ret; 1285 } 1286 1287 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, 1288 int in_len, int out_len) 1289 { 1290 struct rdma_ucm_set_option cmd; 1291 struct ucma_context *ctx; 1292 void *optval; 1293 int ret; 1294 1295 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1296 return -EFAULT; 1297 1298 ctx = ucma_get_ctx(file, cmd.id); 1299 if (IS_ERR(ctx)) 1300 return PTR_ERR(ctx); 1301 1302 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1303 cmd.optlen); 1304 if (IS_ERR(optval)) { 1305 ret = PTR_ERR(optval); 1306 goto out; 1307 } 1308 1309 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, 1310 cmd.optlen); 1311 kfree(optval); 1312 1313 out: 1314 ucma_put_ctx(ctx); 1315 return ret; 1316 } 1317 1318 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 1319 int in_len, int out_len) 1320 { 1321 struct rdma_ucm_notify cmd; 1322 struct ucma_context *ctx; 1323 int ret; 1324 1325 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1326 return -EFAULT; 1327 1328 ctx = ucma_get_ctx(file, cmd.id); 1329 if (IS_ERR(ctx)) 1330 return PTR_ERR(ctx); 1331 1332 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 1333 ucma_put_ctx(ctx); 1334 return ret; 1335 } 1336 1337 static ssize_t ucma_process_join(struct ucma_file *file, 1338 struct rdma_ucm_join_mcast *cmd, int out_len) 1339 { 1340 struct rdma_ucm_create_id_resp resp; 1341 struct ucma_context *ctx; 1342 struct ucma_multicast *mc; 1343 struct sockaddr *addr; 1344 int ret; 1345 u8 join_state; 1346 1347 if (out_len < sizeof(resp)) 1348 return -ENOSPC; 1349 1350 addr = (struct sockaddr *) &cmd->addr; 1351 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1352 return -EINVAL; 1353 1354 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1355 join_state = BIT(FULLMEMBER_JOIN); 1356 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) 1357 join_state = BIT(SENDONLY_FULLMEMBER_JOIN); 1358 else 1359 return -EINVAL; 1360 1361 ctx = ucma_get_ctx_dev(file, cmd->id); 1362 if (IS_ERR(ctx)) 1363 return PTR_ERR(ctx); 1364 1365 mutex_lock(&file->mut); 1366 mc = ucma_alloc_multicast(ctx); 1367 if (!mc) { 1368 ret = -ENOMEM; 1369 goto err1; 1370 } 1371 mc->join_state = join_state; 1372 mc->uid = cmd->uid; 1373 memcpy(&mc->addr, addr, cmd->addr_size); 1374 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, 1375 join_state, mc); 1376 if (ret) 1377 goto err2; 1378 1379 resp.id = mc->id; 1380 if (copy_to_user((void __user *)(unsigned long) cmd->response, 1381 &resp, sizeof(resp))) { 1382 ret = -EFAULT; 1383 goto err3; 1384 } 1385 1386 mutex_unlock(&file->mut); 1387 ucma_put_ctx(ctx); 1388 return 0; 1389 1390 err3: 1391 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 1392 ucma_cleanup_mc_events(mc); 1393 err2: 1394 mutex_lock(&mut); 1395 idr_remove(&multicast_idr, mc->id); 1396 mutex_unlock(&mut); 1397 list_del(&mc->list); 1398 kfree(mc); 1399 err1: 1400 mutex_unlock(&file->mut); 1401 ucma_put_ctx(ctx); 1402 return ret; 1403 } 1404 1405 static ssize_t ucma_join_ip_multicast(struct ucma_file *file, 1406 const char __user *inbuf, 1407 int in_len, int out_len) 1408 { 1409 struct rdma_ucm_join_ip_mcast cmd; 1410 struct rdma_ucm_join_mcast join_cmd; 1411 1412 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1413 return -EFAULT; 1414 1415 join_cmd.response = cmd.response; 1416 join_cmd.uid = cmd.uid; 1417 join_cmd.id = cmd.id; 1418 join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); 1419 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1420 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1421 1422 return ucma_process_join(file, &join_cmd, out_len); 1423 } 1424 1425 static ssize_t ucma_join_multicast(struct ucma_file *file, 1426 const char __user *inbuf, 1427 int in_len, int out_len) 1428 { 1429 struct rdma_ucm_join_mcast cmd; 1430 1431 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1432 return -EFAULT; 1433 1434 if (!rdma_addr_size_kss(&cmd.addr)) 1435 return -EINVAL; 1436 1437 return ucma_process_join(file, &cmd, out_len); 1438 } 1439 1440 static ssize_t ucma_leave_multicast(struct ucma_file *file, 1441 const char __user *inbuf, 1442 int in_len, int out_len) 1443 { 1444 struct rdma_ucm_destroy_id cmd; 1445 struct rdma_ucm_destroy_id_resp resp; 1446 struct ucma_multicast *mc; 1447 int ret = 0; 1448 1449 if (out_len < sizeof(resp)) 1450 return -ENOSPC; 1451 1452 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1453 return -EFAULT; 1454 1455 mutex_lock(&mut); 1456 mc = idr_find(&multicast_idr, cmd.id); 1457 if (!mc) 1458 mc = ERR_PTR(-ENOENT); 1459 else if (mc->ctx->file != file) 1460 mc = ERR_PTR(-EINVAL); 1461 else if (!atomic_inc_not_zero(&mc->ctx->ref)) 1462 mc = ERR_PTR(-ENXIO); 1463 else 1464 idr_remove(&multicast_idr, mc->id); 1465 mutex_unlock(&mut); 1466 1467 if (IS_ERR(mc)) { 1468 ret = PTR_ERR(mc); 1469 goto out; 1470 } 1471 1472 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 1473 mutex_lock(&mc->ctx->file->mut); 1474 ucma_cleanup_mc_events(mc); 1475 list_del(&mc->list); 1476 mutex_unlock(&mc->ctx->file->mut); 1477 1478 ucma_put_ctx(mc->ctx); 1479 resp.events_reported = mc->events_reported; 1480 kfree(mc); 1481 1482 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1483 &resp, sizeof(resp))) 1484 ret = -EFAULT; 1485 out: 1486 return ret; 1487 } 1488 1489 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) 1490 { 1491 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1492 if (file1 < file2) { 1493 mutex_lock(&file1->mut); 1494 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); 1495 } else { 1496 mutex_lock(&file2->mut); 1497 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); 1498 } 1499 } 1500 1501 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2) 1502 { 1503 if (file1 < file2) { 1504 mutex_unlock(&file2->mut); 1505 mutex_unlock(&file1->mut); 1506 } else { 1507 mutex_unlock(&file1->mut); 1508 mutex_unlock(&file2->mut); 1509 } 1510 } 1511 1512 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) 1513 { 1514 struct ucma_event *uevent, *tmp; 1515 1516 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) 1517 if (uevent->ctx == ctx) 1518 list_move_tail(&uevent->list, &file->event_list); 1519 } 1520 1521 static ssize_t ucma_migrate_id(struct ucma_file *new_file, 1522 const char __user *inbuf, 1523 int in_len, int out_len) 1524 { 1525 struct rdma_ucm_migrate_id cmd; 1526 struct rdma_ucm_migrate_resp resp; 1527 struct ucma_context *ctx; 1528 struct fd f; 1529 struct ucma_file *cur_file; 1530 int ret = 0; 1531 1532 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1533 return -EFAULT; 1534 1535 /* Get current fd to protect against it being closed */ 1536 f = fdget(cmd.fd); 1537 if (!f.file) 1538 return -ENOENT; 1539 1540 /* Validate current fd and prevent destruction of id. */ 1541 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1542 if (IS_ERR(ctx)) { 1543 ret = PTR_ERR(ctx); 1544 goto file_put; 1545 } 1546 1547 cur_file = ctx->file; 1548 if (cur_file == new_file) { 1549 resp.events_reported = ctx->events_reported; 1550 goto response; 1551 } 1552 1553 /* 1554 * Migrate events between fd's, maintaining order, and avoiding new 1555 * events being added before existing events. 1556 */ 1557 ucma_lock_files(cur_file, new_file); 1558 mutex_lock(&mut); 1559 1560 list_move_tail(&ctx->list, &new_file->ctx_list); 1561 ucma_move_events(ctx, new_file); 1562 ctx->file = new_file; 1563 resp.events_reported = ctx->events_reported; 1564 1565 mutex_unlock(&mut); 1566 ucma_unlock_files(cur_file, new_file); 1567 1568 response: 1569 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1570 &resp, sizeof(resp))) 1571 ret = -EFAULT; 1572 1573 ucma_put_ctx(ctx); 1574 file_put: 1575 fdput(f); 1576 return ret; 1577 } 1578 1579 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 1580 const char __user *inbuf, 1581 int in_len, int out_len) = { 1582 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 1583 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 1584 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, 1585 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, 1586 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, 1587 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 1588 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 1589 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 1590 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 1591 [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 1592 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 1593 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 1594 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 1595 [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 1596 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, 1597 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 1598 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, 1599 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, 1600 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, 1601 [RDMA_USER_CM_CMD_QUERY] = ucma_query, 1602 [RDMA_USER_CM_CMD_BIND] = ucma_bind, 1603 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 1604 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast 1605 }; 1606 1607 static ssize_t ucma_write(struct file *filp, const char __user *buf, 1608 size_t len, loff_t *pos) 1609 { 1610 struct ucma_file *file = filp->private_data; 1611 struct rdma_ucm_cmd_hdr hdr; 1612 ssize_t ret; 1613 1614 if (WARN_ON_ONCE(!ib_safe_file_access(filp))) 1615 return -EACCES; 1616 1617 if (len < sizeof(hdr)) 1618 return -EINVAL; 1619 1620 if (copy_from_user(&hdr, buf, sizeof(hdr))) 1621 return -EFAULT; 1622 1623 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 1624 return -EINVAL; 1625 1626 if (hdr.in + sizeof(hdr) > len) 1627 return -EINVAL; 1628 1629 if (!ucma_cmd_table[hdr.cmd]) 1630 return -ENOSYS; 1631 1632 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 1633 if (!ret) 1634 ret = len; 1635 1636 return ret; 1637 } 1638 1639 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) 1640 { 1641 struct ucma_file *file = filp->private_data; 1642 unsigned int mask = 0; 1643 1644 poll_wait(filp, &file->poll_wait, wait); 1645 1646 if (!list_empty(&file->event_list)) 1647 mask = POLLIN | POLLRDNORM; 1648 1649 return mask; 1650 } 1651 1652 /* 1653 * ucma_open() does not need the BKL: 1654 * 1655 * - no global state is referred to; 1656 * - there is no ioctl method to race against; 1657 * - no further module initialization is required for open to work 1658 * after the device is registered. 1659 */ 1660 static int ucma_open(struct inode *inode, struct file *filp) 1661 { 1662 struct ucma_file *file; 1663 1664 file = kmalloc(sizeof *file, GFP_KERNEL); 1665 if (!file) 1666 return -ENOMEM; 1667 1668 file->close_wq = alloc_ordered_workqueue("ucma_close_id", 1669 WQ_MEM_RECLAIM); 1670 if (!file->close_wq) { 1671 kfree(file); 1672 return -ENOMEM; 1673 } 1674 1675 INIT_LIST_HEAD(&file->event_list); 1676 INIT_LIST_HEAD(&file->ctx_list); 1677 init_waitqueue_head(&file->poll_wait); 1678 mutex_init(&file->mut); 1679 1680 filp->private_data = file; 1681 file->filp = filp; 1682 1683 return nonseekable_open(inode, filp); 1684 } 1685 1686 static int ucma_close(struct inode *inode, struct file *filp) 1687 { 1688 struct ucma_file *file = filp->private_data; 1689 struct ucma_context *ctx, *tmp; 1690 1691 mutex_lock(&file->mut); 1692 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 1693 ctx->destroying = 1; 1694 mutex_unlock(&file->mut); 1695 1696 mutex_lock(&mut); 1697 idr_remove(&ctx_idr, ctx->id); 1698 mutex_unlock(&mut); 1699 1700 flush_workqueue(file->close_wq); 1701 /* At that step once ctx was marked as destroying and workqueue 1702 * was flushed we are safe from any inflights handlers that 1703 * might put other closing task. 1704 */ 1705 mutex_lock(&mut); 1706 if (!ctx->closing) { 1707 mutex_unlock(&mut); 1708 ucma_put_ctx(ctx); 1709 wait_for_completion(&ctx->comp); 1710 /* rdma_destroy_id ensures that no event handlers are 1711 * inflight for that id before releasing it. 1712 */ 1713 rdma_destroy_id(ctx->cm_id); 1714 } else { 1715 mutex_unlock(&mut); 1716 } 1717 1718 ucma_free_ctx(ctx); 1719 mutex_lock(&file->mut); 1720 } 1721 mutex_unlock(&file->mut); 1722 destroy_workqueue(file->close_wq); 1723 kfree(file); 1724 return 0; 1725 } 1726 1727 static long 1728 ucma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1729 { 1730 1731 switch (cmd) { 1732 case FIONBIO: 1733 case FIOASYNC: 1734 return (0); 1735 default: 1736 return (-ENOTTY); 1737 } 1738 } 1739 1740 static const struct file_operations ucma_fops = { 1741 .owner = THIS_MODULE, 1742 .open = ucma_open, 1743 .release = ucma_close, 1744 .write = ucma_write, 1745 .unlocked_ioctl = ucma_ioctl, 1746 .poll = ucma_poll, 1747 .llseek = no_llseek, 1748 }; 1749 1750 static struct miscdevice ucma_misc = { 1751 .minor = MISC_DYNAMIC_MINOR, 1752 .name = "rdma_cm", 1753 .nodename = "infiniband/rdma_cm", 1754 .mode = 0666, 1755 .fops = &ucma_fops, 1756 }; 1757 1758 static ssize_t show_abi_version(struct device *dev, 1759 struct device_attribute *attr, 1760 char *buf) 1761 { 1762 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 1763 } 1764 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 1765 1766 static int __init ucma_init(void) 1767 { 1768 int ret; 1769 1770 ret = misc_register(&ucma_misc); 1771 if (ret) 1772 return ret; 1773 1774 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 1775 if (ret) { 1776 pr_err("rdma_ucm: couldn't create abi_version attr\n"); 1777 goto err1; 1778 } 1779 1780 return 0; 1781 err1: 1782 misc_deregister(&ucma_misc); 1783 return ret; 1784 } 1785 1786 static void __exit ucma_cleanup(void) 1787 { 1788 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1789 misc_deregister(&ucma_misc); 1790 idr_destroy(&ctx_idr); 1791 idr_destroy(&multicast_idr); 1792 } 1793 1794 module_init_order(ucma_init, SI_ORDER_FIFTH); 1795 module_exit_order(ucma_cleanup, SI_ORDER_FIFTH); 1796