Lines Matching full:group
50 #define mcg_warn_group(group, format, arg...) \ argument
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \ argument
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \ argument
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group; member
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
166 struct mcast_group *group; in mcast_find() local
170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
171 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
173 return group; in mcast_find()
184 struct mcast_group *group) in mcast_insert() argument
195 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
196 sizeof group->rec.mgid); in mcast_insert()
204 rb_link_node(&group->node, parent, link); in mcast_insert()
205 rb_insert_color(&group->node, &ctx->mcg_table); in mcast_insert()
252 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) in send_join_to_wire() argument
262 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; in send_join_to_wire()
265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire()
266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire()
268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_join_to_wire()
272 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_join_to_wire()
279 static int send_leave_to_wire(struct mcast_group *group, u8 join_state) in send_leave_to_wire() argument
292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_leave_to_wire()
293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_leave_to_wire()
301 *sa_data = group->rec; in send_leave_to_wire()
304 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_leave_to_wire()
306 group->state = MCAST_IDLE; in send_leave_to_wire()
311 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_leave_to_wire()
318 static int send_reply_to_slave(int slave, struct mcast_group *group, in send_reply_to_slave() argument
341 *sa_data = group->rec; in send_reply_to_slave()
345 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); in send_reply_to_slave()
348 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); in send_reply_to_slave()
386 /* src is group record, dst is request record */ in cmp_rec()
435 /* release group, return 1 if this was last release and group is destroyed
437 static int release_group(struct mcast_group *group, int from_timeout_handler) in release_group() argument
439 struct mlx4_ib_demux_ctx *ctx = group->demux; in release_group()
443 mutex_lock(&group->lock); in release_group()
444 if (atomic_dec_and_test(&group->refcount)) { in release_group()
446 if (group->state != MCAST_IDLE && in release_group()
447 !cancel_delayed_work(&group->timeout_work)) { in release_group()
448 atomic_inc(&group->refcount); in release_group()
449 mutex_unlock(&group->lock); in release_group()
455 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); in release_group()
457 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in release_group()
458 if (!list_empty(&group->pending_list)) in release_group()
459 mcg_warn_group(group, "releasing a group with non empty pending list\n"); in release_group()
461 rb_erase(&group->node, &ctx->mcg_table); in release_group()
462 list_del_init(&group->mgid0_list); in release_group()
463 mutex_unlock(&group->lock); in release_group()
465 kfree(group); in release_group()
468 mutex_unlock(&group->lock); in release_group()
474 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
480 group->members[i] += inc; in adjust_membership()
483 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
489 if (!group->members[i]) in get_leave_state()
492 return leave_state & (group->rec.scope_join_state & 0xf); in get_leave_state()
495 static int join_group(struct mcast_group *group, int slave, u8 join_mask) in join_group() argument
501 join_state = join_mask & (~group->func[slave].join_state); in join_group()
502 adjust_membership(group, join_state, 1); in join_group()
503 group->func[slave].join_state |= join_state; in join_group()
504 if (group->func[slave].state != MCAST_MEMBER && join_state) { in join_group()
505 group->func[slave].state = MCAST_MEMBER; in join_group()
511 static int leave_group(struct mcast_group *group, int slave, u8 leave_state) in leave_group() argument
515 adjust_membership(group, leave_state, -1); in leave_group()
516 group->func[slave].join_state &= ~leave_state; in leave_group()
517 if (!group->func[slave].join_state) { in leave_group()
518 group->func[slave].state = MCAST_NOT_MEMBER; in leave_group()
524 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) in check_leave() argument
526 if (group->func[slave].state != MCAST_MEMBER) in check_leave()
530 if (~group->func[slave].join_state & leave_mask) in check_leave()
542 struct mcast_group *group; in mlx4_ib_mcg_timeout_handler() local
545 group = container_of(delay, typeof(*group), timeout_work); in mlx4_ib_mcg_timeout_handler()
547 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
548 if (group->state == MCAST_JOIN_SENT) { in mlx4_ib_mcg_timeout_handler()
549 if (!list_empty(&group->pending_list)) { in mlx4_ib_mcg_timeout_handler()
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
553 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
554 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
556 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { in mlx4_ib_mcg_timeout_handler()
557 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
560 kfree(group); in mlx4_ib_mcg_timeout_handler()
563 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
565 mcg_warn_group(group, "DRIVER BUG\n"); in mlx4_ib_mcg_timeout_handler()
566 } else if (group->state == MCAST_LEAVE_SENT) { in mlx4_ib_mcg_timeout_handler()
567 if (group->rec.scope_join_state & 0xf) in mlx4_ib_mcg_timeout_handler()
568 group->rec.scope_join_state &= 0xf0; in mlx4_ib_mcg_timeout_handler()
569 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
570 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
571 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
573 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
575 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); in mlx4_ib_mcg_timeout_handler()
576 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
577 atomic_inc(&group->refcount); in mlx4_ib_mcg_timeout_handler()
578 if (!queue_work(group->demux->mcg_wq, &group->work)) in mlx4_ib_mcg_timeout_handler()
579 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_timeout_handler()
581 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
584 static int handle_leave_req(struct mcast_group *group, u8 leave_mask, in handle_leave_req() argument
590 leave_mask = group->func[req->func].join_state; in handle_leave_req()
592 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
594 leave_group(group, req->func, leave_mask); in handle_leave_req()
597 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_leave_req()
598 --group->func[req->func].num_pend_reqs; in handle_leave_req()
605 static int handle_join_req(struct mcast_group *group, u8 join_mask, in handle_join_req() argument
608 u8 group_join_state = group->rec.scope_join_state & 0xf; in handle_join_req()
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); in handle_join_req()
617 join_group(group, req->func, join_mask); in handle_join_req()
619 --group->func[req->func].num_pend_reqs; in handle_join_req()
620 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_join_req()
627 group->prev_state = group->state; in handle_join_req()
628 if (send_join_to_wire(group, &req->sa_mad)) { in handle_join_req()
629 --group->func[req->func].num_pend_reqs; in handle_join_req()
634 group->state = group->prev_state; in handle_join_req()
636 group->state = MCAST_JOIN_SENT; in handle_join_req()
644 struct mcast_group *group; in mlx4_ib_mcg_work_handler() local
652 group = container_of(work, typeof(*group), work); in mlx4_ib_mcg_work_handler()
654 mutex_lock(&group->lock); in mlx4_ib_mcg_work_handler()
656 /* First, let's see if a response from SM is waiting regarding this group. in mlx4_ib_mcg_work_handler()
657 * If so, we need to update the group's REC. If this is a bad response, we in mlx4_ib_mcg_work_handler()
660 if (group->state == MCAST_RESP_READY) { in mlx4_ib_mcg_work_handler()
662 cancel_delayed_work(&group->timeout_work); in mlx4_ib_mcg_work_handler()
663 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); in mlx4_ib_mcg_work_handler()
664 method = group->response_sa_mad.mad_hdr.method; in mlx4_ib_mcg_work_handler()
665 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { in mlx4_ib_mcg_work_handler()
666 …mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, g… in mlx4_ib_mcg_work_handler()
668 group->response_sa_mad.mad_hdr.tid), in mlx4_ib_mcg_work_handler()
669 (long long)be64_to_cpu(group->last_req_tid)); in mlx4_ib_mcg_work_handler()
670 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
674 if (!list_empty(&group->pending_list)) in mlx4_ib_mcg_work_handler()
675 req = list_first_entry(&group->pending_list, in mlx4_ib_mcg_work_handler()
679 send_reply_to_slave(req->func, group, &req->sa_mad, status); in mlx4_ib_mcg_work_handler()
680 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_work_handler()
686 mcg_warn_group(group, "no request for failed join\n"); in mlx4_ib_mcg_work_handler()
687 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) in mlx4_ib_mcg_work_handler()
694 group->response_sa_mad.data)->scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
695 cur_join_state = group->rec.scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
703 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); in mlx4_ib_mcg_work_handler()
705 group->state = MCAST_IDLE; in mlx4_ib_mcg_work_handler()
710 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
711 req = list_first_entry(&group->pending_list, struct mcast_req, in mlx4_ib_mcg_work_handler()
720 rc += handle_leave_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
722 rc += handle_join_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
726 if (group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
727 req_join_state = get_leave_state(group); in mlx4_ib_mcg_work_handler()
729 group->rec.scope_join_state &= ~req_join_state; in mlx4_ib_mcg_work_handler()
730 group->prev_state = group->state; in mlx4_ib_mcg_work_handler()
731 if (send_leave_to_wire(group, req_join_state)) { in mlx4_ib_mcg_work_handler()
732 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
735 group->state = MCAST_LEAVE_SENT; in mlx4_ib_mcg_work_handler()
739 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) in mlx4_ib_mcg_work_handler()
741 mutex_unlock(&group->lock); in mlx4_ib_mcg_work_handler()
744 release_group(group, 0); in mlx4_ib_mcg_work_handler()
751 struct mcast_group *group = NULL, *cur_group, *n; in search_relocate_mgid0_group() local
755 list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { in search_relocate_mgid0_group()
756 mutex_lock(&group->lock); in search_relocate_mgid0_group()
757 if (group->last_req_tid == tid) { in search_relocate_mgid0_group()
759 group->rec.mgid = *new_mgid; in search_relocate_mgid0_group()
760 sprintf(group->name, "%016llx%016llx", in search_relocate_mgid0_group()
761 (long long)be64_to_cpu(group->rec.mgid.global.subnet_prefix), in search_relocate_mgid0_group()
762 (long long)be64_to_cpu(group->rec.mgid.global.interface_id)); in search_relocate_mgid0_group()
763 list_del_init(&group->mgid0_list); in search_relocate_mgid0_group()
764 cur_group = mcast_insert(ctx, group); in search_relocate_mgid0_group()
767 req = list_first_entry(&group->pending_list, in search_relocate_mgid0_group()
769 --group->func[req->func].num_pend_reqs; in search_relocate_mgid0_group()
773 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
775 release_group(group, 0); in search_relocate_mgid0_group()
779 atomic_inc(&group->refcount); in search_relocate_mgid0_group()
780 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in search_relocate_mgid0_group()
781 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
783 return group; in search_relocate_mgid0_group()
787 list_del(&group->mgid0_list); in search_relocate_mgid0_group()
788 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) in search_relocate_mgid0_group()
789 cancel_delayed_work_sync(&group->timeout_work); in search_relocate_mgid0_group()
791 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { in search_relocate_mgid0_group()
795 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
797 kfree(group); in search_relocate_mgid0_group()
801 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
815 struct mcast_group *group, *cur_group; in acquire_group() local
821 group = mcast_find(ctx, mgid); in acquire_group()
822 if (group) in acquire_group()
829 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
830 if (!group) in acquire_group()
833 group->demux = ctx; in acquire_group()
834 group->rec.mgid = *mgid; in acquire_group()
835 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
836 INIT_LIST_HEAD(&group->mgid0_list); in acquire_group()
838 INIT_LIST_HEAD(&group->func[i].pending); in acquire_group()
839 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); in acquire_group()
840 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); in acquire_group()
841 mutex_init(&group->lock); in acquire_group()
842 sprintf(group->name, "%016llx%016llx", in acquire_group()
844 group->rec.mgid.global.subnet_prefix), in acquire_group()
846 group->rec.mgid.global.interface_id)); in acquire_group()
847 sysfs_attr_init(&group->dentry.attr); in acquire_group()
848 group->dentry.show = sysfs_show_group; in acquire_group()
849 group->dentry.store = NULL; in acquire_group()
850 group->dentry.attr.name = group->name; in acquire_group()
851 group->dentry.attr.mode = 0400; in acquire_group()
852 group->state = MCAST_IDLE; in acquire_group()
855 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); in acquire_group()
859 cur_group = mcast_insert(ctx, group); in acquire_group()
861 mcg_warn("group just showed up %s - confused\n", cur_group->name); in acquire_group()
862 kfree(group); in acquire_group()
866 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in acquire_group()
869 atomic_inc(&group->refcount); in acquire_group()
870 return group; in acquire_group()
875 struct mcast_group *group = req->group; in queue_req() local
877 atomic_inc(&group->refcount); /* for the request */ in queue_req()
878 atomic_inc(&group->refcount); /* for scheduling the work */ in queue_req()
879 list_add_tail(&req->group_list, &group->pending_list); in queue_req()
880 list_add_tail(&req->func_list, &group->func[req->func].pending); in queue_req()
882 if (!queue_work(group->demux->mcg_wq, &group->work)) in queue_req()
883 safe_atomic_dec(&group->refcount); in queue_req()
892 struct mcast_group *group; in mlx4_ib_mcg_demux_handler() local
898 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); in mlx4_ib_mcg_demux_handler()
900 if (IS_ERR(group)) { in mlx4_ib_mcg_demux_handler()
903 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ in mlx4_ib_mcg_demux_handler()
904 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); in mlx4_ib_mcg_demux_handler()
906 group = NULL; in mlx4_ib_mcg_demux_handler()
909 if (!group) in mlx4_ib_mcg_demux_handler()
912 mutex_lock(&group->lock); in mlx4_ib_mcg_demux_handler()
913 group->response_sa_mad = *mad; in mlx4_ib_mcg_demux_handler()
914 group->prev_state = group->state; in mlx4_ib_mcg_demux_handler()
915 group->state = MCAST_RESP_READY; in mlx4_ib_mcg_demux_handler()
917 atomic_inc(&group->refcount); in mlx4_ib_mcg_demux_handler()
918 if (!queue_work(ctx->mcg_wq, &group->work)) in mlx4_ib_mcg_demux_handler()
919 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_demux_handler()
920 mutex_unlock(&group->lock); in mlx4_ib_mcg_demux_handler()
921 release_group(group, 0); in mlx4_ib_mcg_demux_handler()
941 struct mcast_group *group; in mlx4_ib_mcg_multiplex_handler() local
960 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); in mlx4_ib_mcg_multiplex_handler()
962 if (IS_ERR(group)) { in mlx4_ib_mcg_multiplex_handler()
964 return PTR_ERR(group); in mlx4_ib_mcg_multiplex_handler()
966 mutex_lock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
967 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { in mlx4_ib_mcg_multiplex_handler()
968 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
969 mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", in mlx4_ib_mcg_multiplex_handler()
971 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
975 ++group->func[slave].num_pend_reqs; in mlx4_ib_mcg_multiplex_handler()
976 req->group = group; in mlx4_ib_mcg_multiplex_handler()
978 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
979 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
996 struct mcast_group *group = in sysfs_show_group() local
1004 if (group->state == MCAST_IDLE) in sysfs_show_group()
1005 sprintf(state_str, "%s", get_state_string(group->state)); in sysfs_show_group()
1008 get_state_string(group->state), in sysfs_show_group()
1009 (long long)be64_to_cpu(group->last_req_tid)); in sysfs_show_group()
1010 if (list_empty(&group->pending_list)) { in sysfs_show_group()
1013 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in sysfs_show_group()
1019 group->rec.scope_join_state & 0xf, in sysfs_show_group()
1020 group->members[2], group->members[1], group->members[0], in sysfs_show_group()
1021 atomic_read(&group->refcount), in sysfs_show_group()
1025 if (group->func[f].state == MCAST_MEMBER) in sysfs_show_group()
1027 f, group->func[f].join_state); in sysfs_show_group()
1031 be16_to_cpu(group->rec.pkey), in sysfs_show_group()
1032 be32_to_cpu(group->rec.qkey), in sysfs_show_group()
1033 (group->rec.mtusel_mtu & 0xc0) >> 6, in sysfs_show_group()
1034 group->rec.mtusel_mtu & 0x3f, in sysfs_show_group()
1035 group->rec.tclass, in sysfs_show_group()
1036 (group->rec.ratesel_rate & 0xc0) >> 6, in sysfs_show_group()
1037 group->rec.ratesel_rate & 0x3f, in sysfs_show_group()
1038 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28, in sysfs_show_group()
1039 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8, in sysfs_show_group()
1040 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff, in sysfs_show_group()
1041 group->rec.proxy_join); in sysfs_show_group()
1064 static void force_clean_group(struct mcast_group *group) in force_clean_group() argument
1068 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { in force_clean_group()
1072 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); in force_clean_group()
1073 rb_erase(&group->node, &group->demux->mcg_table); in force_clean_group()
1074 kfree(group); in force_clean_group()
1081 struct mcast_group *group; in _mlx4_ib_mcg_port_cleanup() local
1107 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1108 if (atomic_read(&group->refcount)) in _mlx4_ib_mcg_port_cleanup()
1109 …mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); in _mlx4_ib_mcg_port_cleanup()
1111 force_clean_group(group); in _mlx4_ib_mcg_port_cleanup()
1167 static void clear_pending_reqs(struct mcast_group *group, int vf) in clear_pending_reqs() argument
1173 if (!list_empty(&group->pending_list)) in clear_pending_reqs()
1174 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); in clear_pending_reqs()
1176 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { in clear_pending_reqs()
1179 (group->state == MCAST_JOIN_SENT || in clear_pending_reqs()
1180 group->state == MCAST_LEAVE_SENT)) { in clear_pending_reqs()
1181 clear = cancel_delayed_work(&group->timeout_work); in clear_pending_reqs()
1183 group->state = MCAST_IDLE; in clear_pending_reqs()
1186 --group->func[vf].num_pend_reqs; in clear_pending_reqs()
1190 atomic_dec(&group->refcount); in clear_pending_reqs()
1194 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { in clear_pending_reqs()
1195 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", in clear_pending_reqs()
1196 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); in clear_pending_reqs()
1200 static int push_deleteing_req(struct mcast_group *group, int slave) in push_deleteing_req() argument
1205 if (!group->func[slave].join_state) in push_deleteing_req()
1210 mcg_warn_group(group, "failed allocation - may leave stall groups\n"); in push_deleteing_req()
1214 if (!list_empty(&group->func[slave].pending)) { in push_deleteing_req()
1215 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); in push_deleteing_req()
1224 req->group = group; in push_deleteing_req()
1225 ++group->func[slave].num_pend_reqs; in push_deleteing_req()
1233 struct mcast_group *group; in clean_vf_mcast() local
1238 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
1239 mutex_lock(&group->lock); in clean_vf_mcast()
1240 if (atomic_read(&group->refcount)) { in clean_vf_mcast()
1242 clear_pending_reqs(group, slave); in clean_vf_mcast()
1243 push_deleteing_req(group, slave); in clean_vf_mcast()
1245 mutex_unlock(&group->lock); in clean_vf_mcast()