1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate * CDDL HEADER START
3*0Sstevel@tonic-gate *
4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance
7*0Sstevel@tonic-gate * with the License.
8*0Sstevel@tonic-gate *
9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate * and limitations under the License.
13*0Sstevel@tonic-gate *
14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate *
20*0Sstevel@tonic-gate * CDDL HEADER END
21*0Sstevel@tonic-gate */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24*0Sstevel@tonic-gate * Use is subject to license terms.
25*0Sstevel@tonic-gate */
26*0Sstevel@tonic-gate
27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
28*0Sstevel@tonic-gate
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate * This module provides for the management of interconnect adapters
31*0Sstevel@tonic-gate * inter-node connections (aka paths), and IPC. Adapter descriptors are
32*0Sstevel@tonic-gate * maintained on a linked list; one list per adapter devname. Each
33*0Sstevel@tonic-gate * adapter descriptor heads a linked list of path descriptors. There is
34*0Sstevel@tonic-gate * also a linked list of ipc_info descriptors; one for each node. Each
35*0Sstevel@tonic-gate * ipc_info descriptor heads a circular list of ipc tokens (the tokens are
36*0Sstevel@tonic-gate * embedded within a path descriptor). The tokens are used in round robin
37*0Sstevel@tonic-gate * fashion.
38*0Sstevel@tonic-gate *
39*0Sstevel@tonic-gate *
40*0Sstevel@tonic-gate * The exported interface consists of the following functions:
41*0Sstevel@tonic-gate * - rsmka_add_adapter
42*0Sstevel@tonic-gate * - rsmka_remove_adapter
43*0Sstevel@tonic-gate *
44*0Sstevel@tonic-gate * [add_path and remove_path only called for current adapters]
45*0Sstevel@tonic-gate * - rsmka_add_path
46*0Sstevel@tonic-gate * - rsmka_remove_path [a path down request is implicit]
47*0Sstevel@tonic-gate *
48*0Sstevel@tonic-gate * - rsmka_path_up [called at clock ipl for Sun Cluster]
49*0Sstevel@tonic-gate * - rsmka_path_down [called at clock ipl for Sun Cluster]
50*0Sstevel@tonic-gate * - rsmka_disconnect_node [called at clock ipl for Sun Cluster;
51*0Sstevel@tonic-gate * treat like path-down for all node paths;
52*0Sstevel@tonic-gate * can be before node_alive; always before
53*0Sstevel@tonic-gate * node_died.]
54*0Sstevel@tonic-gate *
55*0Sstevel@tonic-gate * [node_alive and node_died are always paired]
56*0Sstevel@tonic-gate * - rsmka_node_alive called after the first cluster path is up
57*0Sstevel@tonic-gate * for this node
58*0Sstevel@tonic-gate * - rsmka_node_died
59*0Sstevel@tonic-gate *
60*0Sstevel@tonic-gate * [set the local node id]
61*0Sstevel@tonic-gate * - rsmka_set_my_nodeid called to set the variable my_nodeid to the
62*0Sstevel@tonic-gate * local node id
63*0Sstevel@tonic-gate *
64*0Sstevel@tonic-gate * Processing for these functions is setup as a state machine supported
65*0Sstevel@tonic-gate * by the data structures described above.
66*0Sstevel@tonic-gate *
67*0Sstevel@tonic-gate * For Sun Cluster these are called from the Path-Manager/Kernel-Agent
68*0Sstevel@tonic-gate * Interface (rsmka_pm_interface.cc).
69*0Sstevel@tonic-gate *
70*0Sstevel@tonic-gate * The functions rsm_path_up, rsm_path_down, and rsm_disconnect_node are
71*0Sstevel@tonic-gate * called at clock interrupt level from the Path-Manager/Kernel-Agent
72*0Sstevel@tonic-gate * Interface which precludes sleeping; so these functions may (optionally)
73*0Sstevel@tonic-gate * defer processing to an independent thread running at normal ipl.
74*0Sstevel@tonic-gate *
75*0Sstevel@tonic-gate *
76*0Sstevel@tonic-gate * lock definitions:
77*0Sstevel@tonic-gate *
78*0Sstevel@tonic-gate * (mutex) work_queue.work_mutex
79*0Sstevel@tonic-gate * protects linked list of work tokens and used
80*0Sstevel@tonic-gate * with cv_wait/cv_signal thread synchronization.
81*0Sstevel@tonic-gate * No other locks acquired when held.
82*0Sstevel@tonic-gate *
83*0Sstevel@tonic-gate * (mutex) adapter_listhead_base.listlock
84*0Sstevel@tonic-gate * protects linked list of adapter listheads
85*0Sstevel@tonic-gate * Always acquired before listhead->mutex
86*0Sstevel@tonic-gate *
87*0Sstevel@tonic-gate *
88*0Sstevel@tonic-gate * (mutex) ipc_info_lock
89*0Sstevel@tonic-gate * protects ipc_info list and sendq token lists
90*0Sstevel@tonic-gate * Always acquired before listhead->mutex
91*0Sstevel@tonic-gate *
92*0Sstevel@tonic-gate * (mutex) listhead->mutex
93*0Sstevel@tonic-gate * protects adapter listhead, linked list of
94*0Sstevel@tonic-gate * adapters, and linked list of paths.
95*0Sstevel@tonic-gate *
96*0Sstevel@tonic-gate * (mutex) path->mutex
97*0Sstevel@tonic-gate * protects the path descriptor.
98*0Sstevel@tonic-gate * work_queue.work_mutex may be acquired when holding
99*0Sstevel@tonic-gate * this lock.
100*0Sstevel@tonic-gate *
101*0Sstevel@tonic-gate * (mutex) adapter->mutex
102*0Sstevel@tonic-gate * protects adapter descriptor contents. used
103*0Sstevel@tonic-gate * mainly for ref_cnt update.
104*0Sstevel@tonic-gate */
105*0Sstevel@tonic-gate
106*0Sstevel@tonic-gate #include <sys/param.h>
107*0Sstevel@tonic-gate #include <sys/kmem.h>
108*0Sstevel@tonic-gate #include <sys/errno.h>
109*0Sstevel@tonic-gate #include <sys/time.h>
110*0Sstevel@tonic-gate #include <sys/devops.h>
111*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
112*0Sstevel@tonic-gate #include <sys/cmn_err.h>
113*0Sstevel@tonic-gate #include <sys/ddi.h>
114*0Sstevel@tonic-gate #include <sys/sunddi.h>
115*0Sstevel@tonic-gate #include <sys/proc.h>
116*0Sstevel@tonic-gate #include <sys/thread.h>
117*0Sstevel@tonic-gate #include <sys/taskq.h>
118*0Sstevel@tonic-gate #include <sys/callb.h>
119*0Sstevel@tonic-gate
120*0Sstevel@tonic-gate #include <sys/rsm/rsm.h>
121*0Sstevel@tonic-gate #include <rsm_in.h>
122*0Sstevel@tonic-gate #include <sys/rsm/rsmka_path_int.h>
123*0Sstevel@tonic-gate
124*0Sstevel@tonic-gate extern void _cplpl_init();
125*0Sstevel@tonic-gate extern void _cplpl_fini();
126*0Sstevel@tonic-gate extern pri_t maxclsyspri;
127*0Sstevel@tonic-gate extern int rsm_hash_size;
128*0Sstevel@tonic-gate
129*0Sstevel@tonic-gate extern rsm_node_id_t my_nodeid;
130*0Sstevel@tonic-gate extern rsmhash_table_t rsm_import_segs;
131*0Sstevel@tonic-gate extern rsm_intr_hand_ret_t rsm_srv_func();
132*0Sstevel@tonic-gate extern void rsmseg_unload(rsmseg_t *);
133*0Sstevel@tonic-gate extern void rsm_suspend_complete(rsm_node_id_t src_node, int flag);
134*0Sstevel@tonic-gate extern int rsmipc_send_controlmsg(path_t *path, int msgtype);
135*0Sstevel@tonic-gate extern void rsmka_path_monitor_initialize();
136*0Sstevel@tonic-gate extern void rsmka_path_monitor_terminate();
137*0Sstevel@tonic-gate
138*0Sstevel@tonic-gate extern adapter_t loopback_adapter;
139*0Sstevel@tonic-gate /*
140*0Sstevel@tonic-gate * Lint errors and warnings are displayed; informational messages
141*0Sstevel@tonic-gate * are suppressed.
142*0Sstevel@tonic-gate */
143*0Sstevel@tonic-gate /* lint -w2 */
144*0Sstevel@tonic-gate
145*0Sstevel@tonic-gate
146*0Sstevel@tonic-gate /*
147*0Sstevel@tonic-gate * macros SQ_TOKEN_TO_PATH and WORK_TOKEN_TO_PATH use a null pointer
148*0Sstevel@tonic-gate * for computational purposes. Ignore the lint warning.
149*0Sstevel@tonic-gate */
150*0Sstevel@tonic-gate /* lint -save -e413 */
151*0Sstevel@tonic-gate /* FUNCTION PROTOTYPES */
152*0Sstevel@tonic-gate static adapter_t *init_adapter(char *, int, rsm_addr_t,
153*0Sstevel@tonic-gate rsm_controller_handle_t, rsm_ops_t *, srv_handler_arg_t *);
154*0Sstevel@tonic-gate adapter_t *rsmka_lookup_adapter(char *, int);
155*0Sstevel@tonic-gate static ipc_info_t *lookup_ipc_info(rsm_node_id_t);
156*0Sstevel@tonic-gate static ipc_info_t *init_ipc_info(rsm_node_id_t, boolean_t);
157*0Sstevel@tonic-gate static path_t *lookup_path(char *, int, rsm_node_id_t, rsm_addr_t);
158*0Sstevel@tonic-gate static void pathup_to_pathactive(ipc_info_t *, rsm_node_id_t);
159*0Sstevel@tonic-gate static void path_importer_disconnect(path_t *);
160*0Sstevel@tonic-gate boolean_t rsmka_do_path_active(path_t *, int);
161*0Sstevel@tonic-gate static boolean_t do_path_up(path_t *, int);
162*0Sstevel@tonic-gate static void do_path_down(path_t *, int);
163*0Sstevel@tonic-gate static void enqueue_work(work_token_t *);
164*0Sstevel@tonic-gate static boolean_t cancel_work(work_token_t *);
165*0Sstevel@tonic-gate static void link_path(path_t *);
166*0Sstevel@tonic-gate static void destroy_path(path_t *);
167*0Sstevel@tonic-gate static void link_sendq_token(sendq_token_t *, rsm_node_id_t);
168*0Sstevel@tonic-gate static void unlink_sendq_token(sendq_token_t *, rsm_node_id_t);
169*0Sstevel@tonic-gate boolean_t rsmka_check_node_alive(rsm_node_id_t);
170*0Sstevel@tonic-gate static void do_deferred_work(caddr_t);
171*0Sstevel@tonic-gate static int create_ipc_sendq(path_t *);
172*0Sstevel@tonic-gate static void destroy_ipc_info(ipc_info_t *);
173*0Sstevel@tonic-gate void rsmka_pathmanager_cleanup();
174*0Sstevel@tonic-gate void rsmka_release_adapter(adapter_t *);
175*0Sstevel@tonic-gate
176*0Sstevel@tonic-gate kt_did_t rsm_thread_id;
177*0Sstevel@tonic-gate int rsmka_terminate_workthread_loop = 0;
178*0Sstevel@tonic-gate
179*0Sstevel@tonic-gate static struct adapter_listhead_list adapter_listhead_base;
180*0Sstevel@tonic-gate static work_queue_t work_queue;
181*0Sstevel@tonic-gate
182*0Sstevel@tonic-gate /* protect ipc_info descriptor manipulation */
183*0Sstevel@tonic-gate static kmutex_t ipc_info_lock;
184*0Sstevel@tonic-gate
185*0Sstevel@tonic-gate static ipc_info_t *ipc_info_head = NULL;
186*0Sstevel@tonic-gate
187*0Sstevel@tonic-gate static int category = RSM_PATH_MANAGER | RSM_KERNEL_AGENT;
188*0Sstevel@tonic-gate
189*0Sstevel@tonic-gate /* for synchronization with rsmipc_send() in rsm.c */
190*0Sstevel@tonic-gate kmutex_t ipc_info_cvlock;
191*0Sstevel@tonic-gate kcondvar_t ipc_info_cv;
192*0Sstevel@tonic-gate
193*0Sstevel@tonic-gate
194*0Sstevel@tonic-gate
195*0Sstevel@tonic-gate /*
196*0Sstevel@tonic-gate * RSMKA PATHMANAGER INITIALIZATION AND CLEANUP ROUTINES
197*0Sstevel@tonic-gate *
198*0Sstevel@tonic-gate */
199*0Sstevel@tonic-gate
200*0Sstevel@tonic-gate
201*0Sstevel@tonic-gate /*
202*0Sstevel@tonic-gate * Called from the rsm module (rsm.c) _init() routine
203*0Sstevel@tonic-gate */
204*0Sstevel@tonic-gate void
rsmka_pathmanager_init()205*0Sstevel@tonic-gate rsmka_pathmanager_init()
206*0Sstevel@tonic-gate {
207*0Sstevel@tonic-gate kthread_t *tp;
208*0Sstevel@tonic-gate
209*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
210*0Sstevel@tonic-gate "rsmka_pathmanager_init enter\n"));
211*0Sstevel@tonic-gate
212*0Sstevel@tonic-gate /* initialization for locks and condition variables */
213*0Sstevel@tonic-gate mutex_init(&work_queue.work_mutex, NULL, MUTEX_DEFAULT, NULL);
214*0Sstevel@tonic-gate mutex_init(&ipc_info_lock, NULL, MUTEX_DEFAULT, NULL);
215*0Sstevel@tonic-gate mutex_init(&ipc_info_cvlock, NULL, MUTEX_DEFAULT, NULL);
216*0Sstevel@tonic-gate mutex_init(&adapter_listhead_base.listlock, NULL,
217*0Sstevel@tonic-gate MUTEX_DEFAULT, NULL);
218*0Sstevel@tonic-gate
219*0Sstevel@tonic-gate cv_init(&work_queue.work_cv, NULL, CV_DEFAULT, NULL);
220*0Sstevel@tonic-gate cv_init(&ipc_info_cv, NULL, CV_DEFAULT, NULL);
221*0Sstevel@tonic-gate
222*0Sstevel@tonic-gate tp = thread_create(NULL, 0, do_deferred_work, NULL, 0, &p0,
223*0Sstevel@tonic-gate TS_RUN, maxclsyspri);
224*0Sstevel@tonic-gate rsm_thread_id = tp->t_did;
225*0Sstevel@tonic-gate
226*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
227*0Sstevel@tonic-gate "rsmka_pathmanager_init done\n"));
228*0Sstevel@tonic-gate }
229*0Sstevel@tonic-gate
230*0Sstevel@tonic-gate void
rsmka_pathmanager_cleanup()231*0Sstevel@tonic-gate rsmka_pathmanager_cleanup()
232*0Sstevel@tonic-gate {
233*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
234*0Sstevel@tonic-gate "rsmka_pathmanager_cleanup enter\n"));
235*0Sstevel@tonic-gate
236*0Sstevel@tonic-gate ASSERT(work_queue.head == NULL);
237*0Sstevel@tonic-gate
238*0Sstevel@tonic-gate /*
239*0Sstevel@tonic-gate * In processing the remove path callbacks from the path monitor
240*0Sstevel@tonic-gate * object, all deferred work will have been completed. So
241*0Sstevel@tonic-gate * awaken the deferred work thread to give it a chance to exit
242*0Sstevel@tonic-gate * the loop.
243*0Sstevel@tonic-gate */
244*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
245*0Sstevel@tonic-gate rsmka_terminate_workthread_loop++;
246*0Sstevel@tonic-gate cv_signal(&work_queue.work_cv);
247*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
248*0Sstevel@tonic-gate
249*0Sstevel@tonic-gate /*
250*0Sstevel@tonic-gate * Wait for the deferred work thread to exit before
251*0Sstevel@tonic-gate * destroying the locks and cleaning up other data
252*0Sstevel@tonic-gate * structures.
253*0Sstevel@tonic-gate */
254*0Sstevel@tonic-gate if (rsm_thread_id)
255*0Sstevel@tonic-gate thread_join(rsm_thread_id);
256*0Sstevel@tonic-gate
257*0Sstevel@tonic-gate /*
258*0Sstevel@tonic-gate * Destroy locks & condition variables
259*0Sstevel@tonic-gate */
260*0Sstevel@tonic-gate mutex_destroy(&work_queue.work_mutex);
261*0Sstevel@tonic-gate cv_destroy(&work_queue.work_cv);
262*0Sstevel@tonic-gate
263*0Sstevel@tonic-gate mutex_enter(&ipc_info_lock);
264*0Sstevel@tonic-gate while (ipc_info_head)
265*0Sstevel@tonic-gate destroy_ipc_info(ipc_info_head);
266*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
267*0Sstevel@tonic-gate
268*0Sstevel@tonic-gate mutex_destroy(&ipc_info_lock);
269*0Sstevel@tonic-gate
270*0Sstevel@tonic-gate mutex_destroy(&ipc_info_cvlock);
271*0Sstevel@tonic-gate cv_destroy(&ipc_info_cv);
272*0Sstevel@tonic-gate
273*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
274*0Sstevel@tonic-gate "rsmka_pathmanager_cleanup done\n"));
275*0Sstevel@tonic-gate
276*0Sstevel@tonic-gate }
277*0Sstevel@tonic-gate
278*0Sstevel@tonic-gate void
rsmka_set_my_nodeid(rsm_node_id_t local_nodeid)279*0Sstevel@tonic-gate rsmka_set_my_nodeid(rsm_node_id_t local_nodeid)
280*0Sstevel@tonic-gate {
281*0Sstevel@tonic-gate my_nodeid = local_nodeid;
282*0Sstevel@tonic-gate
283*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
284*0Sstevel@tonic-gate "rsm: node %d \n", my_nodeid));
285*0Sstevel@tonic-gate
286*0Sstevel@tonic-gate }
287*0Sstevel@tonic-gate
288*0Sstevel@tonic-gate /*
289*0Sstevel@tonic-gate * DEFERRED WORK THREAD AND WORK QUEUE SUPPORT ROUTINES
290*0Sstevel@tonic-gate *
291*0Sstevel@tonic-gate */
292*0Sstevel@tonic-gate
293*0Sstevel@tonic-gate /*
294*0Sstevel@tonic-gate * This function is the executable code of the thread which handles
295*0Sstevel@tonic-gate * deferred work. Work is deferred when a function is called at
296*0Sstevel@tonic-gate * clock ipl and processing may require blocking.
297*0Sstevel@tonic-gate *
298*0Sstevel@tonic-gate *
299*0Sstevel@tonic-gate * The thread is created by a call to taskq_create in rsmka_pathmanager_init.
300*0Sstevel@tonic-gate * After creation, a call to taskq_dispatch causes this function to
301*0Sstevel@tonic-gate * execute. It loops forever - blocked until work is enqueued from
302*0Sstevel@tonic-gate * rsmka_do_path_active, do_path_down, or rsmka_disconnect_node.
303*0Sstevel@tonic-gate * rsmka_pathmanager_cleanup (called from _fini) will
304*0Sstevel@tonic-gate * set rsmka_terminate_workthread_loop and the task processing will
305*0Sstevel@tonic-gate * terminate.
306*0Sstevel@tonic-gate */
307*0Sstevel@tonic-gate static void
do_deferred_work(caddr_t arg)308*0Sstevel@tonic-gate do_deferred_work(caddr_t arg /*ARGSUSED*/)
309*0Sstevel@tonic-gate {
310*0Sstevel@tonic-gate
311*0Sstevel@tonic-gate adapter_t *adapter;
312*0Sstevel@tonic-gate path_t *path;
313*0Sstevel@tonic-gate work_token_t *work_token;
314*0Sstevel@tonic-gate int work_opcode;
315*0Sstevel@tonic-gate rsm_send_q_handle_t sendq_handle;
316*0Sstevel@tonic-gate int error;
317*0Sstevel@tonic-gate timespec_t tv;
318*0Sstevel@tonic-gate callb_cpr_t cprinfo;
319*0Sstevel@tonic-gate
320*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work enter\n"));
321*0Sstevel@tonic-gate
322*0Sstevel@tonic-gate CALLB_CPR_INIT(&cprinfo, &work_queue.work_mutex, callb_generic_cpr,
323*0Sstevel@tonic-gate "rsm_deferred_work");
324*0Sstevel@tonic-gate
325*0Sstevel@tonic-gate for (;;) {
326*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
327*0Sstevel@tonic-gate
328*0Sstevel@tonic-gate if (rsmka_terminate_workthread_loop) {
329*0Sstevel@tonic-gate goto exit;
330*0Sstevel@tonic-gate }
331*0Sstevel@tonic-gate
332*0Sstevel@tonic-gate /* When there is no work to do, block here */
333*0Sstevel@tonic-gate while (work_queue.head == NULL) {
334*0Sstevel@tonic-gate /* Since no work to do, Safe to CPR */
335*0Sstevel@tonic-gate CALLB_CPR_SAFE_BEGIN(&cprinfo);
336*0Sstevel@tonic-gate cv_wait(&work_queue.work_cv, &work_queue.work_mutex);
337*0Sstevel@tonic-gate CALLB_CPR_SAFE_END(&cprinfo, &work_queue.work_mutex);
338*0Sstevel@tonic-gate
339*0Sstevel@tonic-gate if (rsmka_terminate_workthread_loop) {
340*0Sstevel@tonic-gate goto exit;
341*0Sstevel@tonic-gate }
342*0Sstevel@tonic-gate }
343*0Sstevel@tonic-gate
344*0Sstevel@tonic-gate /*
345*0Sstevel@tonic-gate * Remove a work token and begin work
346*0Sstevel@tonic-gate */
347*0Sstevel@tonic-gate work_token = work_queue.head;
348*0Sstevel@tonic-gate work_queue.head = work_token->next;
349*0Sstevel@tonic-gate if (work_queue.tail == work_token)
350*0Sstevel@tonic-gate work_queue.tail = NULL;
351*0Sstevel@tonic-gate
352*0Sstevel@tonic-gate work_opcode = work_token->opcode;
353*0Sstevel@tonic-gate path = WORK_TOKEN_TO_PATH(work_token, work_opcode -1);
354*0Sstevel@tonic-gate work_token->next = NULL;
355*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
356*0Sstevel@tonic-gate
357*0Sstevel@tonic-gate
358*0Sstevel@tonic-gate switch (work_opcode) {
359*0Sstevel@tonic-gate case RSMKA_IPC_UP:
360*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
361*0Sstevel@tonic-gate "do_deferred_work:up, path = %lx\n", path));
362*0Sstevel@tonic-gate error = create_ipc_sendq(path);
363*0Sstevel@tonic-gate mutex_enter(&path->mutex);
364*0Sstevel@tonic-gate if (path->state != RSMKA_PATH_UP) {
365*0Sstevel@tonic-gate /*
366*0Sstevel@tonic-gate * path state has changed, if sendq was created,
367*0Sstevel@tonic-gate * destroy it and return. Don't need to worry
368*0Sstevel@tonic-gate * about sendq ref_cnt since no one starts
369*0Sstevel@tonic-gate * using the sendq till path state becomes
370*0Sstevel@tonic-gate * active
371*0Sstevel@tonic-gate */
372*0Sstevel@tonic-gate if (error == RSM_SUCCESS) {
373*0Sstevel@tonic-gate sendq_handle = path->sendq_token.
374*0Sstevel@tonic-gate rsmpi_sendq_handle;
375*0Sstevel@tonic-gate path->sendq_token.rsmpi_sendq_handle =
376*0Sstevel@tonic-gate NULL;
377*0Sstevel@tonic-gate adapter = path->local_adapter;
378*0Sstevel@tonic-gate mutex_exit(&path->mutex);
379*0Sstevel@tonic-gate
380*0Sstevel@tonic-gate if (sendq_handle != NULL) {
381*0Sstevel@tonic-gate adapter->rsmpi_ops->
382*0Sstevel@tonic-gate rsm_sendq_destroy(
383*0Sstevel@tonic-gate sendq_handle);
384*0Sstevel@tonic-gate }
385*0Sstevel@tonic-gate mutex_enter(&path->mutex);
386*0Sstevel@tonic-gate }
387*0Sstevel@tonic-gate /* free up work token */
388*0Sstevel@tonic-gate work_token->opcode = 0;
389*0Sstevel@tonic-gate
390*0Sstevel@tonic-gate /*
391*0Sstevel@tonic-gate * decrement reference count for the path
392*0Sstevel@tonic-gate * descriptor and signal for synchronization
393*0Sstevel@tonic-gate * with rsmka_remove_path. PATH_HOLD_NOLOCK was
394*0Sstevel@tonic-gate * done by rsmka_path_up.
395*0Sstevel@tonic-gate */
396*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
397*0Sstevel@tonic-gate mutex_exit(&path->mutex);
398*0Sstevel@tonic-gate break;
399*0Sstevel@tonic-gate }
400*0Sstevel@tonic-gate
401*0Sstevel@tonic-gate if (error == RSM_SUCCESS) {
402*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
403*0Sstevel@tonic-gate "do_deferred_work:success on up\n"));
404*0Sstevel@tonic-gate /* clear flag since sendq_create succeeded */
405*0Sstevel@tonic-gate path->flags &= ~RSMKA_SQCREATE_PENDING;
406*0Sstevel@tonic-gate path->state = RSMKA_PATH_ACTIVE;
407*0Sstevel@tonic-gate
408*0Sstevel@tonic-gate /*
409*0Sstevel@tonic-gate * now that path is active we send the
410*0Sstevel@tonic-gate * RSMIPC_MSG_SQREADY to the remote endpoint
411*0Sstevel@tonic-gate */
412*0Sstevel@tonic-gate path->procmsg_cnt = 0;
413*0Sstevel@tonic-gate path->sendq_token.msgbuf_avail = 0;
414*0Sstevel@tonic-gate
415*0Sstevel@tonic-gate /* Calculate local incarnation number */
416*0Sstevel@tonic-gate gethrestime(&tv);
417*0Sstevel@tonic-gate if (tv.tv_sec == RSM_UNKNOWN_INCN)
418*0Sstevel@tonic-gate tv.tv_sec = 1;
419*0Sstevel@tonic-gate path->local_incn = (int64_t)tv.tv_sec;
420*0Sstevel@tonic-gate
421*0Sstevel@tonic-gate /*
422*0Sstevel@tonic-gate * if send fails here its due to some
423*0Sstevel@tonic-gate * non-transient error because QUEUE_FULL is
424*0Sstevel@tonic-gate * not possible here since we are the first
425*0Sstevel@tonic-gate * message on this sendq. The error will cause
426*0Sstevel@tonic-gate * the path to go down anyways, so ignore
427*0Sstevel@tonic-gate * the return value.
428*0Sstevel@tonic-gate */
429*0Sstevel@tonic-gate (void) rsmipc_send_controlmsg(path,
430*0Sstevel@tonic-gate RSMIPC_MSG_SQREADY);
431*0Sstevel@tonic-gate /* wait for SQREADY_ACK message */
432*0Sstevel@tonic-gate path->flags |= RSMKA_WAIT_FOR_SQACK;
433*0Sstevel@tonic-gate } else {
434*0Sstevel@tonic-gate /*
435*0Sstevel@tonic-gate * sendq create failed possibly because
436*0Sstevel@tonic-gate * the remote end is not yet ready eg.
437*0Sstevel@tonic-gate * handler not registered, set a flag
438*0Sstevel@tonic-gate * so that when there is an indication
439*0Sstevel@tonic-gate * that the remote end is ready
440*0Sstevel@tonic-gate * rsmka_do_path_active will be retried.
441*0Sstevel@tonic-gate */
442*0Sstevel@tonic-gate path->flags |= RSMKA_SQCREATE_PENDING;
443*0Sstevel@tonic-gate }
444*0Sstevel@tonic-gate
445*0Sstevel@tonic-gate /* free up work token */
446*0Sstevel@tonic-gate work_token->opcode = 0;
447*0Sstevel@tonic-gate
448*0Sstevel@tonic-gate /*
449*0Sstevel@tonic-gate * decrement reference count for the path
450*0Sstevel@tonic-gate * descriptor and signal for synchronization with
451*0Sstevel@tonic-gate * rsmka_remove_path. PATH_HOLD_NOLOCK was done
452*0Sstevel@tonic-gate * by rsmka_path_up.
453*0Sstevel@tonic-gate */
454*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
455*0Sstevel@tonic-gate mutex_exit(&path->mutex);
456*0Sstevel@tonic-gate
457*0Sstevel@tonic-gate break;
458*0Sstevel@tonic-gate case RSMKA_IPC_DOWN:
459*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
460*0Sstevel@tonic-gate "do_deferred_work:down, path = %lx\n", path));
461*0Sstevel@tonic-gate
462*0Sstevel@tonic-gate /*
463*0Sstevel@tonic-gate * Unlike the processing of path_down in the case
464*0Sstevel@tonic-gate * where the RSMKA_NO_SLEEP flag is not set, here,
465*0Sstevel@tonic-gate * the state of the path is changed directly to
466*0Sstevel@tonic-gate * RSMKA_PATH_DOWN. This is because in this case
467*0Sstevel@tonic-gate * where the RSMKA_NO_SLEEP flag is set, any other
468*0Sstevel@tonic-gate * calls referring this path will just queue up
469*0Sstevel@tonic-gate * and will be processed only after the path
470*0Sstevel@tonic-gate * down processing has completed.
471*0Sstevel@tonic-gate */
472*0Sstevel@tonic-gate mutex_enter(&path->mutex);
473*0Sstevel@tonic-gate path->state = RSMKA_PATH_DOWN;
474*0Sstevel@tonic-gate /*
475*0Sstevel@tonic-gate * clear the WAIT_FOR_SQACK flag since path is down.
476*0Sstevel@tonic-gate */
477*0Sstevel@tonic-gate path->flags &= ~RSMKA_WAIT_FOR_SQACK;
478*0Sstevel@tonic-gate
479*0Sstevel@tonic-gate /*
480*0Sstevel@tonic-gate * this wakes up any thread waiting to receive credits
481*0Sstevel@tonic-gate * in rsmipc_send to tell it that the path is down
482*0Sstevel@tonic-gate * thus releasing the sendq.
483*0Sstevel@tonic-gate */
484*0Sstevel@tonic-gate cv_broadcast(&path->sendq_token.sendq_cv);
485*0Sstevel@tonic-gate
486*0Sstevel@tonic-gate mutex_exit(&path->mutex);
487*0Sstevel@tonic-gate
488*0Sstevel@tonic-gate /* drain the messages from the receive msgbuf */
489*0Sstevel@tonic-gate taskq_wait(path->recv_taskq);
490*0Sstevel@tonic-gate
491*0Sstevel@tonic-gate /*
492*0Sstevel@tonic-gate * The path_importer_disconnect function has to
493*0Sstevel@tonic-gate * be called after releasing the mutex on the path
494*0Sstevel@tonic-gate * in order to avoid any recursive mutex enter panics
495*0Sstevel@tonic-gate */
496*0Sstevel@tonic-gate path_importer_disconnect(path);
497*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
498*0Sstevel@tonic-gate "do_deferred_work: success on down\n"));
499*0Sstevel@tonic-gate /*
500*0Sstevel@tonic-gate * decrement reference count for the path
501*0Sstevel@tonic-gate * descriptor and signal for synchronization with
502*0Sstevel@tonic-gate * rsmka_remove_path. PATH_HOLD_NOLOCK was done
503*0Sstevel@tonic-gate * by rsmka_path_down.
504*0Sstevel@tonic-gate */
505*0Sstevel@tonic-gate mutex_enter(&path->mutex);
506*0Sstevel@tonic-gate
507*0Sstevel@tonic-gate #ifdef DEBUG
508*0Sstevel@tonic-gate /*
509*0Sstevel@tonic-gate * Some IPC messages left in the recv_buf,
510*0Sstevel@tonic-gate * they'll be dropped
511*0Sstevel@tonic-gate */
512*0Sstevel@tonic-gate if (path->msgbuf_cnt != 0)
513*0Sstevel@tonic-gate cmn_err(CE_NOTE,
514*0Sstevel@tonic-gate "path=%lx msgbuf_cnt != 0\n",
515*0Sstevel@tonic-gate (uintptr_t)path);
516*0Sstevel@tonic-gate #endif
517*0Sstevel@tonic-gate
518*0Sstevel@tonic-gate /*
519*0Sstevel@tonic-gate * Don't want to destroy a send queue when a token
520*0Sstevel@tonic-gate * has been acquired; so wait 'til the token is
521*0Sstevel@tonic-gate * no longer referenced (with a cv_wait).
522*0Sstevel@tonic-gate */
523*0Sstevel@tonic-gate while (path->sendq_token.ref_cnt != 0)
524*0Sstevel@tonic-gate cv_wait(&path->sendq_token.sendq_cv,
525*0Sstevel@tonic-gate &path->mutex);
526*0Sstevel@tonic-gate
527*0Sstevel@tonic-gate sendq_handle = path->sendq_token.rsmpi_sendq_handle;
528*0Sstevel@tonic-gate path->sendq_token.rsmpi_sendq_handle = NULL;
529*0Sstevel@tonic-gate
530*0Sstevel@tonic-gate /* destroy the send queue and release the handle */
531*0Sstevel@tonic-gate if (sendq_handle != NULL) {
532*0Sstevel@tonic-gate adapter = path->local_adapter;
533*0Sstevel@tonic-gate adapter->rsmpi_ops->rsm_sendq_destroy(
534*0Sstevel@tonic-gate sendq_handle);
535*0Sstevel@tonic-gate }
536*0Sstevel@tonic-gate
537*0Sstevel@tonic-gate work_token->opcode = 0;
538*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
539*0Sstevel@tonic-gate mutex_exit(&path->mutex);
540*0Sstevel@tonic-gate break;
541*0Sstevel@tonic-gate default:
542*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
543*0Sstevel@tonic-gate "do_deferred_work: bad work token opcode\n"));
544*0Sstevel@tonic-gate break;
545*0Sstevel@tonic-gate }
546*0Sstevel@tonic-gate }
547*0Sstevel@tonic-gate
548*0Sstevel@tonic-gate exit:
549*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work done\n"));
550*0Sstevel@tonic-gate /*
551*0Sstevel@tonic-gate * CALLB_CPR_EXIT does a mutex_exit for
552*0Sstevel@tonic-gate * the work_queue.work_mutex
553*0Sstevel@tonic-gate */
554*0Sstevel@tonic-gate CALLB_CPR_EXIT(&cprinfo);
555*0Sstevel@tonic-gate }
556*0Sstevel@tonic-gate
557*0Sstevel@tonic-gate /*
558*0Sstevel@tonic-gate * Work is inserted at the tail of the list and processed from the
559*0Sstevel@tonic-gate * head of the list.
560*0Sstevel@tonic-gate */
561*0Sstevel@tonic-gate static void
enqueue_work(work_token_t * token)562*0Sstevel@tonic-gate enqueue_work(work_token_t *token)
563*0Sstevel@tonic-gate {
564*0Sstevel@tonic-gate work_token_t *tail_token;
565*0Sstevel@tonic-gate
566*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work enter\n"));
567*0Sstevel@tonic-gate
568*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&work_queue.work_mutex));
569*0Sstevel@tonic-gate
570*0Sstevel@tonic-gate token->next = NULL;
571*0Sstevel@tonic-gate if (work_queue.head == NULL) {
572*0Sstevel@tonic-gate work_queue.head = work_queue.tail = token;
573*0Sstevel@tonic-gate } else {
574*0Sstevel@tonic-gate tail_token = work_queue.tail;
575*0Sstevel@tonic-gate work_queue.tail = tail_token->next = token;
576*0Sstevel@tonic-gate }
577*0Sstevel@tonic-gate
578*0Sstevel@tonic-gate /* wake up deferred work thread */
579*0Sstevel@tonic-gate cv_signal(&work_queue.work_cv);
580*0Sstevel@tonic-gate
581*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work done\n"));
582*0Sstevel@tonic-gate }
583*0Sstevel@tonic-gate
584*0Sstevel@tonic-gate
585*0Sstevel@tonic-gate /*
586*0Sstevel@tonic-gate * If the work_token is found on the work queue, the work is cancelled
587*0Sstevel@tonic-gate * by removing the token from the work queue.
588*0Sstevel@tonic-gate *
589*0Sstevel@tonic-gate * Return true if a work_token was found and cancelled, otherwise return false
590*0Sstevel@tonic-gate *
591*0Sstevel@tonic-gate * enqueue_work increments the path refcnt to make sure that the path doesn't
592*0Sstevel@tonic-gate * go away, callers of cancel_work need to decrement the refcnt of the path to
593*0Sstevel@tonic-gate * which this work_token belongs if a work_token is found in the work_queue
594*0Sstevel@tonic-gate * and cancelled ie. when the return value is B_TRUE.
595*0Sstevel@tonic-gate */
596*0Sstevel@tonic-gate static boolean_t
cancel_work(work_token_t * work_token)597*0Sstevel@tonic-gate cancel_work(work_token_t *work_token)
598*0Sstevel@tonic-gate {
599*0Sstevel@tonic-gate work_token_t *current_token;
600*0Sstevel@tonic-gate work_token_t *prev_token = NULL;
601*0Sstevel@tonic-gate boolean_t cancelled = B_FALSE;
602*0Sstevel@tonic-gate
603*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work enter\n"));
604*0Sstevel@tonic-gate
605*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&work_queue.work_mutex));
606*0Sstevel@tonic-gate
607*0Sstevel@tonic-gate
608*0Sstevel@tonic-gate current_token = work_queue.head;
609*0Sstevel@tonic-gate while (current_token != NULL) {
610*0Sstevel@tonic-gate if (current_token == work_token) {
611*0Sstevel@tonic-gate if (work_token == work_queue.head)
612*0Sstevel@tonic-gate work_queue.head = work_token->next;
613*0Sstevel@tonic-gate else
614*0Sstevel@tonic-gate prev_token->next = work_token->next;
615*0Sstevel@tonic-gate if (work_token == work_queue.tail)
616*0Sstevel@tonic-gate work_queue.tail = prev_token;
617*0Sstevel@tonic-gate
618*0Sstevel@tonic-gate current_token->opcode = 0;
619*0Sstevel@tonic-gate current_token->next = NULL;
620*0Sstevel@tonic-gate /* found and cancelled work */
621*0Sstevel@tonic-gate cancelled = B_TRUE;
622*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
623*0Sstevel@tonic-gate "cancelled_work = 0x%p\n", work_token));
624*0Sstevel@tonic-gate break;
625*0Sstevel@tonic-gate }
626*0Sstevel@tonic-gate prev_token = current_token;
627*0Sstevel@tonic-gate current_token = current_token->next;
628*0Sstevel@tonic-gate }
629*0Sstevel@tonic-gate
630*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work done\n"));
631*0Sstevel@tonic-gate return (cancelled);
632*0Sstevel@tonic-gate }
633*0Sstevel@tonic-gate
634*0Sstevel@tonic-gate /*
635*0Sstevel@tonic-gate * EXTERNAL INTERFACES
636*0Sstevel@tonic-gate *
637*0Sstevel@tonic-gate * For Galileo Clustering, these routine are called from
638*0Sstevel@tonic-gate * rsmka_pm_interface.cc
639*0Sstevel@tonic-gate *
640*0Sstevel@tonic-gate */
641*0Sstevel@tonic-gate
642*0Sstevel@tonic-gate /*
643*0Sstevel@tonic-gate *
644*0Sstevel@tonic-gate * If the adapter is supported by rsmpi then initialize an adapter descriptor
645*0Sstevel@tonic-gate * and link it to the list of adapters. The adapter attributes are obtained
646*0Sstevel@tonic-gate * from rsmpi and stored in the descriptor. Finally, a service handler
647*0Sstevel@tonic-gate * for incoming ipc on this adapter is registered with rsmpi.
648*0Sstevel@tonic-gate * A pointer for the adapter descriptor is returned as a cookie to the
649*0Sstevel@tonic-gate * caller. The cookie may be use with subsequent calls to save the time of
650*0Sstevel@tonic-gate * adapter descriptor lookup.
651*0Sstevel@tonic-gate *
652*0Sstevel@tonic-gate * The adapter descriptor maintains a reference count which is intialized
653*0Sstevel@tonic-gate * to 1 and incremented on lookups; when a cookie is used in place of
654*0Sstevel@tonic-gate * a lookup, an explicit ADAPTER_HOLD is required.
655*0Sstevel@tonic-gate */
656*0Sstevel@tonic-gate
657*0Sstevel@tonic-gate void *
rsmka_add_adapter(char * name,int instance,rsm_addr_t hwaddr)658*0Sstevel@tonic-gate rsmka_add_adapter(char *name, int instance, rsm_addr_t hwaddr)
659*0Sstevel@tonic-gate {
660*0Sstevel@tonic-gate adapter_t *adapter;
661*0Sstevel@tonic-gate rsm_controller_object_t rsmpi_adapter_object;
662*0Sstevel@tonic-gate rsm_controller_handle_t rsmpi_adapter_handle;
663*0Sstevel@tonic-gate rsm_ops_t *rsmpi_ops_vector;
664*0Sstevel@tonic-gate int adapter_is_supported;
665*0Sstevel@tonic-gate rsm_controller_attr_t *attr;
666*0Sstevel@tonic-gate srv_handler_arg_t *srv_hdlr_argp;
667*0Sstevel@tonic-gate int result;
668*0Sstevel@tonic-gate
669*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter enter\n"));
670*0Sstevel@tonic-gate
671*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
672*0Sstevel@tonic-gate "rsmka_add_adapter: name = %s instance = %d hwaddr = %llx \n",
673*0Sstevel@tonic-gate name, instance, hwaddr));
674*0Sstevel@tonic-gate
675*0Sstevel@tonic-gate /* verify name length */
676*0Sstevel@tonic-gate if (strlen(name) >= MAXNAMELEN) {
677*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
678*0Sstevel@tonic-gate "rsmka_add_adapter done: name too long\n"));
679*0Sstevel@tonic-gate return (NULL);
680*0Sstevel@tonic-gate }
681*0Sstevel@tonic-gate
682*0Sstevel@tonic-gate
683*0Sstevel@tonic-gate /* Check if rsmpi supports this adapter type */
684*0Sstevel@tonic-gate adapter_is_supported = rsm_get_controller(name, instance,
685*0Sstevel@tonic-gate &rsmpi_adapter_object, RSM_VERSION);
686*0Sstevel@tonic-gate
687*0Sstevel@tonic-gate if (adapter_is_supported != RSM_SUCCESS) {
688*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
689*0Sstevel@tonic-gate "rsmka_add_adapter done: adapter not supported\n"));
690*0Sstevel@tonic-gate return (NULL);
691*0Sstevel@tonic-gate }
692*0Sstevel@tonic-gate
693*0Sstevel@tonic-gate rsmpi_adapter_handle = rsmpi_adapter_object.handle;
694*0Sstevel@tonic-gate rsmpi_ops_vector = rsmpi_adapter_object.ops;
695*0Sstevel@tonic-gate
696*0Sstevel@tonic-gate /* Get adapter attributes */
697*0Sstevel@tonic-gate result = rsm_get_controller_attr(rsmpi_adapter_handle, &attr);
698*0Sstevel@tonic-gate if (result != RSM_SUCCESS) {
699*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
700*0Sstevel@tonic-gate "rsm: get_controller_attr(%d) Failed %x\n",
701*0Sstevel@tonic-gate instance, result));
702*0Sstevel@tonic-gate (void) rsm_release_controller(name, instance,
703*0Sstevel@tonic-gate &rsmpi_adapter_object);
704*0Sstevel@tonic-gate return (NULL);
705*0Sstevel@tonic-gate }
706*0Sstevel@tonic-gate
707*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
708*0Sstevel@tonic-gate "rsmka_add_adapter: register service offset = %d\n", hwaddr));
709*0Sstevel@tonic-gate
710*0Sstevel@tonic-gate /*
711*0Sstevel@tonic-gate * create a srv_handler_arg_t object, initialize it and register
712*0Sstevel@tonic-gate * it along with rsm_srv_func. This get passed as the
713*0Sstevel@tonic-gate * rsm_intr_hand_arg_t when the handler gets invoked.
714*0Sstevel@tonic-gate */
715*0Sstevel@tonic-gate srv_hdlr_argp = kmem_zalloc(sizeof (srv_handler_arg_t), KM_SLEEP);
716*0Sstevel@tonic-gate
717*0Sstevel@tonic-gate (void) strcpy(srv_hdlr_argp->adapter_name, name);
718*0Sstevel@tonic-gate srv_hdlr_argp->adapter_instance = instance;
719*0Sstevel@tonic-gate srv_hdlr_argp->adapter_hwaddr = hwaddr;
720*0Sstevel@tonic-gate
721*0Sstevel@tonic-gate /* Have rsmpi register the ipc receive handler for this adapter */
722*0Sstevel@tonic-gate /*
723*0Sstevel@tonic-gate * Currently, we need to pass in a separate service identifier for
724*0Sstevel@tonic-gate * each adapter. In order to obtain a unique service identifier
725*0Sstevel@tonic-gate * value for an adapter, we add the hardware address of the
726*0Sstevel@tonic-gate * adapter to the base service identifier(RSM_SERVICE which is
727*0Sstevel@tonic-gate * defined as RSM_INTR_T_KA as per the RSMPI specification).
728*0Sstevel@tonic-gate * NOTE: This may result in using some of the service identifier
729*0Sstevel@tonic-gate * values defined for RSM_INTR_T_XPORT(the Sun Cluster Transport).
730*0Sstevel@tonic-gate */
731*0Sstevel@tonic-gate result = rsmpi_ops_vector->rsm_register_handler(
732*0Sstevel@tonic-gate rsmpi_adapter_handle, &rsmpi_adapter_object,
733*0Sstevel@tonic-gate RSM_SERVICE+(uint_t)hwaddr, rsm_srv_func,
734*0Sstevel@tonic-gate (rsm_intr_hand_arg_t)srv_hdlr_argp, NULL, 0);
735*0Sstevel@tonic-gate
736*0Sstevel@tonic-gate if (result != RSM_SUCCESS) {
737*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
738*0Sstevel@tonic-gate "rsmka_add_adapter done: rsm_register_handler"
739*0Sstevel@tonic-gate "failed %d\n",
740*0Sstevel@tonic-gate instance));
741*0Sstevel@tonic-gate return (NULL);
742*0Sstevel@tonic-gate }
743*0Sstevel@tonic-gate
744*0Sstevel@tonic-gate /* Initialize an adapter descriptor and add it to the adapter list */
745*0Sstevel@tonic-gate adapter = init_adapter(name, instance, hwaddr,
746*0Sstevel@tonic-gate rsmpi_adapter_handle, rsmpi_ops_vector, srv_hdlr_argp);
747*0Sstevel@tonic-gate
748*0Sstevel@tonic-gate /* Copy over the attributes from the pointer returned to us */
749*0Sstevel@tonic-gate adapter->rsm_attr = *attr;
750*0Sstevel@tonic-gate
751*0Sstevel@tonic-gate /*
752*0Sstevel@tonic-gate * With the addition of the topology obtainment interface, applications
753*0Sstevel@tonic-gate * now get the local nodeid from the topology data structure.
754*0Sstevel@tonic-gate *
755*0Sstevel@tonic-gate * adapter->rsm_attr.attr_node_id = my_nodeid;
756*0Sstevel@tonic-gate */
757*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
758*0Sstevel@tonic-gate "rsmka_add_adapter: adapter = %lx\n", adapter));
759*0Sstevel@tonic-gate
760*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter done\n"));
761*0Sstevel@tonic-gate
762*0Sstevel@tonic-gate /* return adapter pointer as a cookie for later fast access */
763*0Sstevel@tonic-gate return ((void *)adapter);
764*0Sstevel@tonic-gate }
765*0Sstevel@tonic-gate
766*0Sstevel@tonic-gate
767*0Sstevel@tonic-gate /*
768*0Sstevel@tonic-gate * Unlink the adapter descriptor and call rsmka_release_adapter which
769*0Sstevel@tonic-gate * will decrement the reference count and possibly free the desriptor.
770*0Sstevel@tonic-gate */
771*0Sstevel@tonic-gate boolean_t
rsmka_remove_adapter(char * name,uint_t instance,void * cookie,int flags)772*0Sstevel@tonic-gate rsmka_remove_adapter(char *name, uint_t instance, void *cookie, int flags)
773*0Sstevel@tonic-gate {
774*0Sstevel@tonic-gate adapter_t *adapter;
775*0Sstevel@tonic-gate adapter_listhead_t *listhead;
776*0Sstevel@tonic-gate adapter_t *prev, *current;
777*0Sstevel@tonic-gate rsm_controller_object_t rsm_cntl_obj;
778*0Sstevel@tonic-gate
779*0Sstevel@tonic-gate
780*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
781*0Sstevel@tonic-gate "rsmka_remove_adapter enter\n"));
782*0Sstevel@tonic-gate
783*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
784*0Sstevel@tonic-gate "rsmka_remove_adapter: cookie = %lx\n", cookie));
785*0Sstevel@tonic-gate
786*0Sstevel@tonic-gate if (flags & RSMKA_USE_COOKIE) {
787*0Sstevel@tonic-gate adapter = (adapter_t *)cookie;
788*0Sstevel@tonic-gate } else {
789*0Sstevel@tonic-gate adapter = rsmka_lookup_adapter(name, instance);
790*0Sstevel@tonic-gate /*
791*0Sstevel@tonic-gate * rsmka_lookup_adapter increments the ref_cnt; need
792*0Sstevel@tonic-gate * to decrement here to get true count
793*0Sstevel@tonic-gate */
794*0Sstevel@tonic-gate ADAPTER_RELE(adapter);
795*0Sstevel@tonic-gate }
796*0Sstevel@tonic-gate ASSERT(adapter->next_path == NULL);
797*0Sstevel@tonic-gate
798*0Sstevel@tonic-gate listhead = adapter->listhead;
799*0Sstevel@tonic-gate
800*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
801*0Sstevel@tonic-gate
802*0Sstevel@tonic-gate mutex_enter(&listhead->mutex);
803*0Sstevel@tonic-gate
804*0Sstevel@tonic-gate /* find the adapter in the list and remove it */
805*0Sstevel@tonic-gate prev = NULL;
806*0Sstevel@tonic-gate current = listhead->next_adapter;
807*0Sstevel@tonic-gate while (current != NULL) {
808*0Sstevel@tonic-gate if (adapter->instance == current->instance) {
809*0Sstevel@tonic-gate break;
810*0Sstevel@tonic-gate } else {
811*0Sstevel@tonic-gate prev = current;
812*0Sstevel@tonic-gate current = current->next;
813*0Sstevel@tonic-gate }
814*0Sstevel@tonic-gate }
815*0Sstevel@tonic-gate ASSERT(current != NULL);
816*0Sstevel@tonic-gate
817*0Sstevel@tonic-gate if (prev == NULL)
818*0Sstevel@tonic-gate listhead->next_adapter = current->next;
819*0Sstevel@tonic-gate else
820*0Sstevel@tonic-gate prev->next = current->next;
821*0Sstevel@tonic-gate
822*0Sstevel@tonic-gate listhead->adapter_count--;
823*0Sstevel@tonic-gate
824*0Sstevel@tonic-gate mutex_exit(&listhead->mutex);
825*0Sstevel@tonic-gate
826*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
827*0Sstevel@tonic-gate
828*0Sstevel@tonic-gate mutex_enter(¤t->mutex);
829*0Sstevel@tonic-gate
830*0Sstevel@tonic-gate /*
831*0Sstevel@tonic-gate * unregister the handler
832*0Sstevel@tonic-gate */
833*0Sstevel@tonic-gate current->rsmpi_ops->rsm_unregister_handler(current->rsmpi_handle,
834*0Sstevel@tonic-gate RSM_SERVICE+current->hwaddr, rsm_srv_func,
835*0Sstevel@tonic-gate (rsm_intr_hand_arg_t)current->hdlr_argp);
836*0Sstevel@tonic-gate
837*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "rsmka_remove_adapter: unreg hdlr "
838*0Sstevel@tonic-gate ":adapter=%lx, hwaddr=%lx\n", current, current->hwaddr));
839*0Sstevel@tonic-gate
840*0Sstevel@tonic-gate rsm_cntl_obj.handle = current->rsmpi_handle;
841*0Sstevel@tonic-gate rsm_cntl_obj.ops = current->rsmpi_ops;
842*0Sstevel@tonic-gate
843*0Sstevel@tonic-gate (void) rsm_release_controller(current->listhead->adapter_devname,
844*0Sstevel@tonic-gate current->instance, &rsm_cntl_obj);
845*0Sstevel@tonic-gate
846*0Sstevel@tonic-gate mutex_exit(¤t->mutex);
847*0Sstevel@tonic-gate
848*0Sstevel@tonic-gate rsmka_release_adapter(current);
849*0Sstevel@tonic-gate
850*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
851*0Sstevel@tonic-gate "rsmka_remove_adapter done\n"));
852*0Sstevel@tonic-gate
853*0Sstevel@tonic-gate return (B_TRUE);
854*0Sstevel@tonic-gate }
855*0Sstevel@tonic-gate
856*0Sstevel@tonic-gate /*
857*0Sstevel@tonic-gate * An adapter descriptor will exist from an earlier add_adapter. This
858*0Sstevel@tonic-gate * function does:
859*0Sstevel@tonic-gate * initialize the path descriptor
860*0Sstevel@tonic-gate * initialize the ipc descriptor (it may already exist)
861*0Sstevel@tonic-gate * initialize and link a sendq token for this path
862*0Sstevel@tonic-gate */
863*0Sstevel@tonic-gate void *
rsmka_add_path(char * adapter_name,int adapter_instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,int rem_adapt_instance,void * cookie,int flags)864*0Sstevel@tonic-gate rsmka_add_path(char *adapter_name, int adapter_instance,
865*0Sstevel@tonic-gate rsm_node_id_t remote_node,
866*0Sstevel@tonic-gate rsm_addr_t remote_hwaddr, int rem_adapt_instance,
867*0Sstevel@tonic-gate void *cookie, int flags)
868*0Sstevel@tonic-gate {
869*0Sstevel@tonic-gate
870*0Sstevel@tonic-gate path_t *path;
871*0Sstevel@tonic-gate adapter_t *adapter;
872*0Sstevel@tonic-gate char tq_name[TASKQ_NAMELEN];
873*0Sstevel@tonic-gate
874*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path enter\n"));
875*0Sstevel@tonic-gate
876*0Sstevel@tonic-gate /* allocate new path descriptor */
877*0Sstevel@tonic-gate path = kmem_zalloc(sizeof (path_t), KM_SLEEP);
878*0Sstevel@tonic-gate
879*0Sstevel@tonic-gate if (flags & RSMKA_USE_COOKIE) {
880*0Sstevel@tonic-gate adapter = (adapter_t *)cookie;
881*0Sstevel@tonic-gate ADAPTER_HOLD(adapter);
882*0Sstevel@tonic-gate } else {
883*0Sstevel@tonic-gate adapter = rsmka_lookup_adapter(adapter_name, adapter_instance);
884*0Sstevel@tonic-gate }
885*0Sstevel@tonic-gate
886*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
887*0Sstevel@tonic-gate "rsmka_add_path: adapter = %lx\n", adapter));
888*0Sstevel@tonic-gate
889*0Sstevel@tonic-gate /*
890*0Sstevel@tonic-gate * initialize path descriptor
891*0Sstevel@tonic-gate * don't need to increment adapter reference count because
892*0Sstevel@tonic-gate * it can't be removed if paths exist for it.
893*0Sstevel@tonic-gate */
894*0Sstevel@tonic-gate mutex_init(&path->mutex, NULL, MUTEX_DEFAULT, NULL);
895*0Sstevel@tonic-gate
896*0Sstevel@tonic-gate PATH_HOLD(path);
897*0Sstevel@tonic-gate path->state = RSMKA_PATH_DOWN;
898*0Sstevel@tonic-gate path->remote_node = remote_node;
899*0Sstevel@tonic-gate path->remote_hwaddr = remote_hwaddr;
900*0Sstevel@tonic-gate path->remote_devinst = rem_adapt_instance;
901*0Sstevel@tonic-gate path->local_adapter = adapter;
902*0Sstevel@tonic-gate
903*0Sstevel@tonic-gate /* taskq is for sendq on adapter with remote_hwaddr on remote_node */
904*0Sstevel@tonic-gate (void) snprintf(tq_name, sizeof (tq_name), "%x_%llx",
905*0Sstevel@tonic-gate remote_node, (unsigned long long) remote_hwaddr);
906*0Sstevel@tonic-gate
907*0Sstevel@tonic-gate path->recv_taskq = taskq_create_instance(tq_name, adapter_instance,
908*0Sstevel@tonic-gate RSMKA_ONE_THREAD, maxclsyspri, RSMIPC_MAX_MESSAGES,
909*0Sstevel@tonic-gate RSMIPC_MAX_MESSAGES, TASKQ_PREPOPULATE);
910*0Sstevel@tonic-gate
911*0Sstevel@tonic-gate /* allocate the message buffer array */
912*0Sstevel@tonic-gate path->msgbuf_queue = (msgbuf_elem_t *)kmem_zalloc(
913*0Sstevel@tonic-gate RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t), KM_SLEEP);
914*0Sstevel@tonic-gate
915*0Sstevel@tonic-gate /*
916*0Sstevel@tonic-gate * init cond variables for synch with rsmipc_send()
917*0Sstevel@tonic-gate * and rsmka_remove_path
918*0Sstevel@tonic-gate */
919*0Sstevel@tonic-gate cv_init(&path->sendq_token.sendq_cv, NULL, CV_DEFAULT, NULL);
920*0Sstevel@tonic-gate cv_init(&path->hold_cv, NULL, CV_DEFAULT, NULL);
921*0Sstevel@tonic-gate
922*0Sstevel@tonic-gate /* link path descriptor on adapter path list */
923*0Sstevel@tonic-gate link_path(path);
924*0Sstevel@tonic-gate
925*0Sstevel@tonic-gate /* link the path sendq token on the ipc_info token list */
926*0Sstevel@tonic-gate link_sendq_token(&path->sendq_token, remote_node);
927*0Sstevel@tonic-gate
928*0Sstevel@tonic-gate /* ADAPTER_HOLD done above by rsmka_lookup_adapter */
929*0Sstevel@tonic-gate ADAPTER_RELE(adapter);
930*0Sstevel@tonic-gate
931*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "rsmka_add_path: path = %lx\n", path));
932*0Sstevel@tonic-gate
933*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path done\n"));
934*0Sstevel@tonic-gate return ((void *)path);
935*0Sstevel@tonic-gate }
936*0Sstevel@tonic-gate
937*0Sstevel@tonic-gate /*
938*0Sstevel@tonic-gate * Wait for the path descriptor reference count to become zero then
939*0Sstevel@tonic-gate * directly call path down processing. Finally, unlink the sendq token and
940*0Sstevel@tonic-gate * free the path descriptor memory.
941*0Sstevel@tonic-gate *
942*0Sstevel@tonic-gate * Note: lookup_path locks the path and increments the path hold count
943*0Sstevel@tonic-gate */
944*0Sstevel@tonic-gate void
rsmka_remove_path(char * adapter_name,int instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)945*0Sstevel@tonic-gate rsmka_remove_path(char *adapter_name, int instance, rsm_node_id_t remote_node,
946*0Sstevel@tonic-gate rsm_addr_t remote_hwaddr, void *path_cookie, int flags)
947*0Sstevel@tonic-gate {
948*0Sstevel@tonic-gate path_t *path;
949*0Sstevel@tonic-gate
950*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path enter\n"));
951*0Sstevel@tonic-gate
952*0Sstevel@tonic-gate if (flags & RSMKA_USE_COOKIE) {
953*0Sstevel@tonic-gate path = (path_t *)path_cookie;
954*0Sstevel@tonic-gate mutex_enter(&path->mutex);
955*0Sstevel@tonic-gate } else {
956*0Sstevel@tonic-gate path = lookup_path(adapter_name, instance, remote_node,
957*0Sstevel@tonic-gate remote_hwaddr);
958*0Sstevel@tonic-gate
959*0Sstevel@tonic-gate /*
960*0Sstevel@tonic-gate * remember, lookup_path increments the reference
961*0Sstevel@tonic-gate * count - so decrement now so we can get to zero
962*0Sstevel@tonic-gate */
963*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
964*0Sstevel@tonic-gate }
965*0Sstevel@tonic-gate
966*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
967*0Sstevel@tonic-gate "rsmka_remove_path: path = %lx\n", path));
968*0Sstevel@tonic-gate
969*0Sstevel@tonic-gate while (path->state == RSMKA_PATH_GOING_DOWN)
970*0Sstevel@tonic-gate cv_wait(&path->hold_cv, &path->mutex);
971*0Sstevel@tonic-gate
972*0Sstevel@tonic-gate /* attempt to cancel any possibly pending work */
973*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
974*0Sstevel@tonic-gate if (cancel_work(&path->work_token[RSMKA_IPC_UP_INDEX])) {
975*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
976*0Sstevel@tonic-gate }
977*0Sstevel@tonic-gate if (cancel_work(&path->work_token[RSMKA_IPC_DOWN_INDEX])) {
978*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
979*0Sstevel@tonic-gate }
980*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
981*0Sstevel@tonic-gate
982*0Sstevel@tonic-gate /*
983*0Sstevel@tonic-gate * The path descriptor ref cnt was set to 1 initially when
984*0Sstevel@tonic-gate * the path was added. So we need to do a decrement here to
985*0Sstevel@tonic-gate * balance that.
986*0Sstevel@tonic-gate */
987*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
988*0Sstevel@tonic-gate
989*0Sstevel@tonic-gate switch (path->state) {
990*0Sstevel@tonic-gate case RSMKA_PATH_UP:
991*0Sstevel@tonic-gate /* clear the flag */
992*0Sstevel@tonic-gate path->flags &= ~RSMKA_SQCREATE_PENDING;
993*0Sstevel@tonic-gate path->state = RSMKA_PATH_DOWN;
994*0Sstevel@tonic-gate break;
995*0Sstevel@tonic-gate case RSMKA_PATH_DOWN:
996*0Sstevel@tonic-gate break;
997*0Sstevel@tonic-gate
998*0Sstevel@tonic-gate case RSMKA_PATH_ACTIVE:
999*0Sstevel@tonic-gate /*
1000*0Sstevel@tonic-gate * rsmka_remove_path should not call do_path_down
1001*0Sstevel@tonic-gate * with the RSMKA_NO_SLEEP flag set since for
1002*0Sstevel@tonic-gate * this code path, the deferred work would
1003*0Sstevel@tonic-gate * incorrectly do a PATH_RELE_NOLOCK.
1004*0Sstevel@tonic-gate */
1005*0Sstevel@tonic-gate do_path_down(path, 0);
1006*0Sstevel@tonic-gate break;
1007*0Sstevel@tonic-gate default:
1008*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1009*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
1010*0Sstevel@tonic-gate "rsm_remove_path: invalid path state %d\n",
1011*0Sstevel@tonic-gate path->state));
1012*0Sstevel@tonic-gate return;
1013*0Sstevel@tonic-gate
1014*0Sstevel@tonic-gate }
1015*0Sstevel@tonic-gate
1016*0Sstevel@tonic-gate /*
1017*0Sstevel@tonic-gate * wait for all references to the path to be released. If a thread
1018*0Sstevel@tonic-gate * was waiting to receive credits do_path_down should wake it up
1019*0Sstevel@tonic-gate * since the path is going down and that will cause the sleeping
1020*0Sstevel@tonic-gate * thread to release its hold on the path.
1021*0Sstevel@tonic-gate */
1022*0Sstevel@tonic-gate while (path->ref_cnt != 0) {
1023*0Sstevel@tonic-gate cv_wait(&path->hold_cv, &path->mutex);
1024*0Sstevel@tonic-gate }
1025*0Sstevel@tonic-gate
1026*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1027*0Sstevel@tonic-gate
1028*0Sstevel@tonic-gate /*
1029*0Sstevel@tonic-gate * remove from ipc token list
1030*0Sstevel@tonic-gate * NOTE: use the remote_node value from the path structure
1031*0Sstevel@tonic-gate * since for RSMKA_USE_COOKIE being set, the remote_node
1032*0Sstevel@tonic-gate * value passed into rsmka_remove_path is 0.
1033*0Sstevel@tonic-gate */
1034*0Sstevel@tonic-gate unlink_sendq_token(&path->sendq_token, path->remote_node);
1035*0Sstevel@tonic-gate
1036*0Sstevel@tonic-gate /* unlink from adapter path list and free path descriptor */
1037*0Sstevel@tonic-gate destroy_path(path);
1038*0Sstevel@tonic-gate
1039*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path done\n"));
1040*0Sstevel@tonic-gate }
1041*0Sstevel@tonic-gate
1042*0Sstevel@tonic-gate /*
1043*0Sstevel@tonic-gate *
1044*0Sstevel@tonic-gate * LOCKING:
1045*0Sstevel@tonic-gate * lookup_path locks the path and increments the path hold count. If the remote
1046*0Sstevel@tonic-gate * node is not in the alive state, do_path_up will release the lock and
1047*0Sstevel@tonic-gate * decrement the hold count. Otherwise rsmka_do_path_active will release the
1048*0Sstevel@tonic-gate * lock prior to waking up the work thread.
1049*0Sstevel@tonic-gate *
1050*0Sstevel@tonic-gate * REF_CNT:
1051*0Sstevel@tonic-gate * The path descriptor ref_cnt is incremented here; it will be decremented
1052*0Sstevel@tonic-gate * when path up processing is completed in do_path_up or by the work thread
1053*0Sstevel@tonic-gate * if the path up is deferred.
1054*0Sstevel@tonic-gate *
1055*0Sstevel@tonic-gate */
1056*0Sstevel@tonic-gate boolean_t
rsmka_path_up(char * adapter_name,uint_t adapter_instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)1057*0Sstevel@tonic-gate rsmka_path_up(char *adapter_name, uint_t adapter_instance,
1058*0Sstevel@tonic-gate rsm_node_id_t remote_node, rsm_addr_t remote_hwaddr,
1059*0Sstevel@tonic-gate void *path_cookie, int flags)
1060*0Sstevel@tonic-gate {
1061*0Sstevel@tonic-gate
1062*0Sstevel@tonic-gate path_t *path;
1063*0Sstevel@tonic-gate boolean_t rval = B_TRUE;
1064*0Sstevel@tonic-gate
1065*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up enter\n"));
1066*0Sstevel@tonic-gate
1067*0Sstevel@tonic-gate if (flags & RSMKA_USE_COOKIE) {
1068*0Sstevel@tonic-gate path = (path_t *)path_cookie;
1069*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1070*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(path);
1071*0Sstevel@tonic-gate } else {
1072*0Sstevel@tonic-gate path = lookup_path(adapter_name, adapter_instance,
1073*0Sstevel@tonic-gate remote_node, remote_hwaddr);
1074*0Sstevel@tonic-gate }
1075*0Sstevel@tonic-gate
1076*0Sstevel@tonic-gate while (path->state == RSMKA_PATH_GOING_DOWN)
1077*0Sstevel@tonic-gate cv_wait(&path->hold_cv, &path->mutex);
1078*0Sstevel@tonic-gate
1079*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "rsmka_path_up: path = %lx\n", path));
1080*0Sstevel@tonic-gate rval = do_path_up(path, flags);
1081*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1082*0Sstevel@tonic-gate
1083*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up done\n"));
1084*0Sstevel@tonic-gate return (rval);
1085*0Sstevel@tonic-gate }
1086*0Sstevel@tonic-gate
1087*0Sstevel@tonic-gate /*
1088*0Sstevel@tonic-gate *
1089*0Sstevel@tonic-gate * LOCKING:
1090*0Sstevel@tonic-gate * lookup_path locks the path and increments the path hold count. If the
1091*0Sstevel@tonic-gate * current state is ACTIVE the path lock is release prior to waking up
1092*0Sstevel@tonic-gate * the work thread in do_path_down . The work thread will decrement the hold
1093*0Sstevel@tonic-gate * count when the work for this is finished.
1094*0Sstevel@tonic-gate *
1095*0Sstevel@tonic-gate *
1096*0Sstevel@tonic-gate * REF_CNT:
1097*0Sstevel@tonic-gate * The path descriptor ref_cnt is incremented here; it will be decremented
1098*0Sstevel@tonic-gate * when path down processing is completed in do_path_down or by the work thread
1099*0Sstevel@tonic-gate * if the path down is deferred.
1100*0Sstevel@tonic-gate *
1101*0Sstevel@tonic-gate */
1102*0Sstevel@tonic-gate boolean_t
rsmka_path_down(char * adapter_devname,int instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)1103*0Sstevel@tonic-gate rsmka_path_down(char *adapter_devname, int instance, rsm_node_id_t remote_node,
1104*0Sstevel@tonic-gate rsm_addr_t remote_hwaddr, void *path_cookie, int flags)
1105*0Sstevel@tonic-gate {
1106*0Sstevel@tonic-gate path_t *path;
1107*0Sstevel@tonic-gate boolean_t rval = B_TRUE;
1108*0Sstevel@tonic-gate
1109*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down enter\n"));
1110*0Sstevel@tonic-gate
1111*0Sstevel@tonic-gate if (flags & RSMKA_USE_COOKIE) {
1112*0Sstevel@tonic-gate path = (path_t *)path_cookie;
1113*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1114*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(path);
1115*0Sstevel@tonic-gate } else {
1116*0Sstevel@tonic-gate path = lookup_path(adapter_devname, instance, remote_node,
1117*0Sstevel@tonic-gate remote_hwaddr);
1118*0Sstevel@tonic-gate }
1119*0Sstevel@tonic-gate
1120*0Sstevel@tonic-gate while (path->state == RSMKA_PATH_GOING_DOWN)
1121*0Sstevel@tonic-gate cv_wait(&path->hold_cv, &path->mutex);
1122*0Sstevel@tonic-gate
1123*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1124*0Sstevel@tonic-gate "rsmka_path_down: path = %lx\n", path));
1125*0Sstevel@tonic-gate
1126*0Sstevel@tonic-gate switch (path->state) {
1127*0Sstevel@tonic-gate case RSMKA_PATH_UP:
1128*0Sstevel@tonic-gate /* clear the flag */
1129*0Sstevel@tonic-gate path->flags &= ~RSMKA_SQCREATE_PENDING;
1130*0Sstevel@tonic-gate path->state = RSMKA_PATH_GOING_DOWN;
1131*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1132*0Sstevel@tonic-gate
1133*0Sstevel@tonic-gate /*
1134*0Sstevel@tonic-gate * release path->mutex since enqueued tasks acquire it.
1135*0Sstevel@tonic-gate * Drain all the enqueued tasks.
1136*0Sstevel@tonic-gate */
1137*0Sstevel@tonic-gate taskq_wait(path->recv_taskq);
1138*0Sstevel@tonic-gate
1139*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1140*0Sstevel@tonic-gate path->state = RSMKA_PATH_DOWN;
1141*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1142*0Sstevel@tonic-gate break;
1143*0Sstevel@tonic-gate case RSMKA_PATH_DOWN:
1144*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1145*0Sstevel@tonic-gate break;
1146*0Sstevel@tonic-gate case RSMKA_PATH_ACTIVE:
1147*0Sstevel@tonic-gate do_path_down(path, flags);
1148*0Sstevel@tonic-gate /*
1149*0Sstevel@tonic-gate * Need to release the path refcnt. Either done in do_path_down
1150*0Sstevel@tonic-gate * or do_deferred_work for RSMKA_NO_SLEEP being set. Has to be
1151*0Sstevel@tonic-gate * done here for RSMKA_NO_SLEEP not set.
1152*0Sstevel@tonic-gate */
1153*0Sstevel@tonic-gate if (!(flags & RSMKA_NO_SLEEP))
1154*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1155*0Sstevel@tonic-gate break;
1156*0Sstevel@tonic-gate default:
1157*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_ERR,
1158*0Sstevel@tonic-gate "rsm_path_down: invalid path state %d\n", path->state));
1159*0Sstevel@tonic-gate rval = B_FALSE;
1160*0Sstevel@tonic-gate }
1161*0Sstevel@tonic-gate
1162*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1163*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down done\n"));
1164*0Sstevel@tonic-gate return (rval);
1165*0Sstevel@tonic-gate }
1166*0Sstevel@tonic-gate
1167*0Sstevel@tonic-gate
1168*0Sstevel@tonic-gate /*
1169*0Sstevel@tonic-gate * Paths cannot become active until node_is_alive is marked true
1170*0Sstevel@tonic-gate * in the ipc_info descriptor for the node
1171*0Sstevel@tonic-gate *
1172*0Sstevel@tonic-gate * In the event this is called before any paths have been added,
1173*0Sstevel@tonic-gate * init_ipc_info if called here.
1174*0Sstevel@tonic-gate *
1175*0Sstevel@tonic-gate */
1176*0Sstevel@tonic-gate boolean_t
rsmka_node_alive(rsm_node_id_t remote_node)1177*0Sstevel@tonic-gate rsmka_node_alive(rsm_node_id_t remote_node)
1178*0Sstevel@tonic-gate {
1179*0Sstevel@tonic-gate ipc_info_t *ipc_info;
1180*0Sstevel@tonic-gate
1181*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive enter\n"));
1182*0Sstevel@tonic-gate
1183*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1184*0Sstevel@tonic-gate "rsmka_node_alive: remote_node = %x\n", remote_node));
1185*0Sstevel@tonic-gate
1186*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
1187*0Sstevel@tonic-gate
1188*0Sstevel@tonic-gate if (ipc_info == NULL) {
1189*0Sstevel@tonic-gate ipc_info = init_ipc_info(remote_node, B_TRUE);
1190*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1191*0Sstevel@tonic-gate "rsmka_node_alive: new ipc_info = %lx\n", ipc_info));
1192*0Sstevel@tonic-gate } else {
1193*0Sstevel@tonic-gate ASSERT(ipc_info->node_is_alive == B_FALSE);
1194*0Sstevel@tonic-gate ipc_info->node_is_alive = B_TRUE;
1195*0Sstevel@tonic-gate }
1196*0Sstevel@tonic-gate
1197*0Sstevel@tonic-gate pathup_to_pathactive(ipc_info, remote_node);
1198*0Sstevel@tonic-gate
1199*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
1200*0Sstevel@tonic-gate
1201*0Sstevel@tonic-gate /* rsmipc_send() may be waiting for a sendq_token */
1202*0Sstevel@tonic-gate mutex_enter(&ipc_info_cvlock);
1203*0Sstevel@tonic-gate cv_broadcast(&ipc_info_cv);
1204*0Sstevel@tonic-gate mutex_exit(&ipc_info_cvlock);
1205*0Sstevel@tonic-gate
1206*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive done\n"));
1207*0Sstevel@tonic-gate
1208*0Sstevel@tonic-gate return (B_TRUE);
1209*0Sstevel@tonic-gate }
1210*0Sstevel@tonic-gate
1211*0Sstevel@tonic-gate
1212*0Sstevel@tonic-gate
1213*0Sstevel@tonic-gate /*
1214*0Sstevel@tonic-gate * Paths cannot become active when node_is_alive is marked false
1215*0Sstevel@tonic-gate * in the ipc_info descriptor for the node
1216*0Sstevel@tonic-gate */
1217*0Sstevel@tonic-gate boolean_t
rsmka_node_died(rsm_node_id_t remote_node)1218*0Sstevel@tonic-gate rsmka_node_died(rsm_node_id_t remote_node)
1219*0Sstevel@tonic-gate {
1220*0Sstevel@tonic-gate ipc_info_t *ipc_info;
1221*0Sstevel@tonic-gate
1222*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_died enter\n"));
1223*0Sstevel@tonic-gate
1224*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1225*0Sstevel@tonic-gate "rsmka_node_died: remote_node = %x\n", remote_node));
1226*0Sstevel@tonic-gate
1227*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
1228*0Sstevel@tonic-gate if (ipc_info == NULL)
1229*0Sstevel@tonic-gate return (B_FALSE);
1230*0Sstevel@tonic-gate
1231*0Sstevel@tonic-gate ASSERT(ipc_info->node_is_alive == B_TRUE);
1232*0Sstevel@tonic-gate ipc_info->node_is_alive = B_FALSE;
1233*0Sstevel@tonic-gate
1234*0Sstevel@tonic-gate rsm_suspend_complete(remote_node, RSM_SUSPEND_NODEDEAD);
1235*0Sstevel@tonic-gate
1236*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
1237*0Sstevel@tonic-gate
1238*0Sstevel@tonic-gate /* rsmipc_send() may be waiting for a sendq_token */
1239*0Sstevel@tonic-gate mutex_enter(&ipc_info_cvlock);
1240*0Sstevel@tonic-gate cv_broadcast(&ipc_info_cv);
1241*0Sstevel@tonic-gate mutex_exit(&ipc_info_cvlock);
1242*0Sstevel@tonic-gate
1243*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_died done\n"));
1244*0Sstevel@tonic-gate
1245*0Sstevel@tonic-gate return (B_TRUE);
1246*0Sstevel@tonic-gate }
1247*0Sstevel@tonic-gate
1248*0Sstevel@tonic-gate /*
1249*0Sstevel@tonic-gate * Treat like path_down for all paths for the specified remote node.
1250*0Sstevel@tonic-gate * Always invoked before node died.
1251*0Sstevel@tonic-gate *
1252*0Sstevel@tonic-gate * NOTE: This routine is not called from the cluster path interface; the
1253*0Sstevel@tonic-gate * rsmka_path_down is called directly for each path.
1254*0Sstevel@tonic-gate */
1255*0Sstevel@tonic-gate void
rsmka_disconnect_node(rsm_node_id_t remote_node,int flags)1256*0Sstevel@tonic-gate rsmka_disconnect_node(rsm_node_id_t remote_node, int flags)
1257*0Sstevel@tonic-gate {
1258*0Sstevel@tonic-gate ipc_info_t *ipc_info;
1259*0Sstevel@tonic-gate path_t *path;
1260*0Sstevel@tonic-gate sendq_token_t *sendq_token;
1261*0Sstevel@tonic-gate work_token_t *up_token;
1262*0Sstevel@tonic-gate work_token_t *down_token;
1263*0Sstevel@tonic-gate boolean_t do_work = B_FALSE;
1264*0Sstevel@tonic-gate boolean_t cancelled = B_FALSE;
1265*0Sstevel@tonic-gate
1266*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1267*0Sstevel@tonic-gate "rsmka_disconnect_node enter\n"));
1268*0Sstevel@tonic-gate
1269*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1270*0Sstevel@tonic-gate "rsmka_disconnect_node: node = %d\n", remote_node));
1271*0Sstevel@tonic-gate
1272*0Sstevel@tonic-gate if (flags & RSMKA_NO_SLEEP) {
1273*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
1274*0Sstevel@tonic-gate
1275*0Sstevel@tonic-gate sendq_token = ipc_info->token_list;
1276*0Sstevel@tonic-gate
1277*0Sstevel@tonic-gate while (sendq_token != NULL) {
1278*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(sendq_token);
1279*0Sstevel@tonic-gate PATH_HOLD(path);
1280*0Sstevel@tonic-gate up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1281*0Sstevel@tonic-gate down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1282*0Sstevel@tonic-gate
1283*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
1284*0Sstevel@tonic-gate
1285*0Sstevel@tonic-gate /* if an up token is enqueued, remove it */
1286*0Sstevel@tonic-gate cancelled = cancel_work(up_token);
1287*0Sstevel@tonic-gate
1288*0Sstevel@tonic-gate /*
1289*0Sstevel@tonic-gate * If the path is active and down work hasn't
1290*0Sstevel@tonic-gate * already been setup then down work is needed.
1291*0Sstevel@tonic-gate * else
1292*0Sstevel@tonic-gate * if up work wasn't canceled because it was
1293*0Sstevel@tonic-gate * already being processed then down work is needed
1294*0Sstevel@tonic-gate */
1295*0Sstevel@tonic-gate if (path->state == RSMKA_PATH_ACTIVE) {
1296*0Sstevel@tonic-gate if (down_token->opcode == 0)
1297*0Sstevel@tonic-gate do_work = B_TRUE;
1298*0Sstevel@tonic-gate } else
1299*0Sstevel@tonic-gate if (up_token->opcode == RSMKA_IPC_UP)
1300*0Sstevel@tonic-gate do_work = B_TRUE;
1301*0Sstevel@tonic-gate
1302*0Sstevel@tonic-gate if (do_work == B_TRUE) {
1303*0Sstevel@tonic-gate down_token->opcode = RSMKA_IPC_DOWN;
1304*0Sstevel@tonic-gate enqueue_work(down_token);
1305*0Sstevel@tonic-gate }
1306*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
1307*0Sstevel@tonic-gate
1308*0Sstevel@tonic-gate if (do_work == B_FALSE)
1309*0Sstevel@tonic-gate PATH_RELE(path);
1310*0Sstevel@tonic-gate
1311*0Sstevel@tonic-gate if (cancelled) {
1312*0Sstevel@tonic-gate PATH_RELE(path);
1313*0Sstevel@tonic-gate }
1314*0Sstevel@tonic-gate sendq_token = sendq_token->next;
1315*0Sstevel@tonic-gate }
1316*0Sstevel@tonic-gate
1317*0Sstevel@tonic-gate /*
1318*0Sstevel@tonic-gate * Now that all the work is enqueued, wakeup the work
1319*0Sstevel@tonic-gate * thread.
1320*0Sstevel@tonic-gate */
1321*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
1322*0Sstevel@tonic-gate cv_signal(&work_queue.work_cv);
1323*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
1324*0Sstevel@tonic-gate
1325*0Sstevel@tonic-gate IPCINFO_RELE_NOLOCK(ipc_info);
1326*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
1327*0Sstevel@tonic-gate
1328*0Sstevel@tonic-gate } else {
1329*0Sstevel@tonic-gate /* get locked ipc_info descriptor */
1330*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
1331*0Sstevel@tonic-gate
1332*0Sstevel@tonic-gate sendq_token = ipc_info->token_list;
1333*0Sstevel@tonic-gate while (sendq_token != NULL) {
1334*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(sendq_token);
1335*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1336*0Sstevel@tonic-gate "rsmka_disconnect_node: path_down"
1337*0Sstevel@tonic-gate "for path = %x\n",
1338*0Sstevel@tonic-gate path));
1339*0Sstevel@tonic-gate (void) rsmka_path_down(0, 0, 0, 0,
1340*0Sstevel@tonic-gate path, RSMKA_USE_COOKIE);
1341*0Sstevel@tonic-gate sendq_token = sendq_token->next;
1342*0Sstevel@tonic-gate if (sendq_token == ipc_info->token_list)
1343*0Sstevel@tonic-gate break;
1344*0Sstevel@tonic-gate }
1345*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
1346*0Sstevel@tonic-gate }
1347*0Sstevel@tonic-gate
1348*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1349*0Sstevel@tonic-gate "rsmka_disconnect_node done\n"));
1350*0Sstevel@tonic-gate }
1351*0Sstevel@tonic-gate
1352*0Sstevel@tonic-gate
1353*0Sstevel@tonic-gate /*
1354*0Sstevel@tonic-gate * Called from rsm_node_alive - if a path to a remote node is in
1355*0Sstevel@tonic-gate * state RSMKA_PATH_UP, transition the state to RSMKA_PATH_ACTIVE with a
1356*0Sstevel@tonic-gate * call to rsmka_do_path_active.
1357*0Sstevel@tonic-gate *
1358*0Sstevel@tonic-gate * REF_CNT:
1359*0Sstevel@tonic-gate * The path descriptor ref_cnt is incremented here; it will be decremented
1360*0Sstevel@tonic-gate * when path up processing is completed in rsmka_do_path_active or by the work
1361*0Sstevel@tonic-gate * thread if the path up is deferred.
1362*0Sstevel@tonic-gate */
1363*0Sstevel@tonic-gate static void
pathup_to_pathactive(ipc_info_t * ipc_info,rsm_node_id_t remote_node)1364*0Sstevel@tonic-gate pathup_to_pathactive(ipc_info_t *ipc_info, rsm_node_id_t remote_node)
1365*0Sstevel@tonic-gate {
1366*0Sstevel@tonic-gate path_t *path;
1367*0Sstevel@tonic-gate sendq_token_t *token;
1368*0Sstevel@tonic-gate
1369*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1370*0Sstevel@tonic-gate "pathup_to_pathactive enter\n"));
1371*0Sstevel@tonic-gate
1372*0Sstevel@tonic-gate remote_node = remote_node;
1373*0Sstevel@tonic-gate
1374*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ipc_info_lock));
1375*0Sstevel@tonic-gate
1376*0Sstevel@tonic-gate token = ipc_info->token_list;
1377*0Sstevel@tonic-gate while (token != NULL) {
1378*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(token);
1379*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1380*0Sstevel@tonic-gate if (path->state == RSMKA_PATH_UP) {
1381*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(path);
1382*0Sstevel@tonic-gate (void) rsmka_do_path_active(path, 0);
1383*0Sstevel@tonic-gate }
1384*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1385*0Sstevel@tonic-gate token = token->next;
1386*0Sstevel@tonic-gate if (token == ipc_info->token_list)
1387*0Sstevel@tonic-gate break;
1388*0Sstevel@tonic-gate }
1389*0Sstevel@tonic-gate
1390*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1391*0Sstevel@tonic-gate "pathup_to_pathactive done\n"));
1392*0Sstevel@tonic-gate }
1393*0Sstevel@tonic-gate
1394*0Sstevel@tonic-gate /*
1395*0Sstevel@tonic-gate * Called from pathup_to_pathactive and do_path_up. The objective is to
1396*0Sstevel@tonic-gate * create an ipc send queue and transition to state RSMKA_PATH_ACTIVE.
1397*0Sstevel@tonic-gate * For the no sleep case we may need to defer the work using a token.
1398*0Sstevel@tonic-gate *
1399*0Sstevel@tonic-gate */
1400*0Sstevel@tonic-gate boolean_t
rsmka_do_path_active(path_t * path,int flags)1401*0Sstevel@tonic-gate rsmka_do_path_active(path_t *path, int flags)
1402*0Sstevel@tonic-gate {
1403*0Sstevel@tonic-gate work_token_t *up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1404*0Sstevel@tonic-gate work_token_t *down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1405*0Sstevel@tonic-gate boolean_t do_work = B_FALSE;
1406*0Sstevel@tonic-gate int error;
1407*0Sstevel@tonic-gate timespec_t tv;
1408*0Sstevel@tonic-gate adapter_t *adapter;
1409*0Sstevel@tonic-gate rsm_send_q_handle_t sqhdl;
1410*0Sstevel@tonic-gate
1411*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1412*0Sstevel@tonic-gate "rsmka_do_path_active enter\n"));
1413*0Sstevel@tonic-gate
1414*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
1415*0Sstevel@tonic-gate
1416*0Sstevel@tonic-gate if (flags & RSMKA_NO_SLEEP) {
1417*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
1418*0Sstevel@tonic-gate
1419*0Sstevel@tonic-gate /* if a down token is enqueued, remove it */
1420*0Sstevel@tonic-gate if (cancel_work(down_token)) {
1421*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1422*0Sstevel@tonic-gate }
1423*0Sstevel@tonic-gate
1424*0Sstevel@tonic-gate /*
1425*0Sstevel@tonic-gate * If the path is not active and up work hasn't
1426*0Sstevel@tonic-gate * already been setup then up work is needed.
1427*0Sstevel@tonic-gate * else
1428*0Sstevel@tonic-gate * if down work wasn't canceled because it was
1429*0Sstevel@tonic-gate * already being processed then up work is needed
1430*0Sstevel@tonic-gate */
1431*0Sstevel@tonic-gate if (path->state != RSMKA_PATH_ACTIVE) {
1432*0Sstevel@tonic-gate if (up_token->opcode == 0)
1433*0Sstevel@tonic-gate do_work = B_TRUE;
1434*0Sstevel@tonic-gate } else
1435*0Sstevel@tonic-gate if (down_token->opcode == RSMKA_IPC_DOWN)
1436*0Sstevel@tonic-gate do_work = B_TRUE;
1437*0Sstevel@tonic-gate
1438*0Sstevel@tonic-gate if (do_work == B_TRUE) {
1439*0Sstevel@tonic-gate up_token->opcode = RSMKA_IPC_UP;
1440*0Sstevel@tonic-gate enqueue_work(up_token);
1441*0Sstevel@tonic-gate }
1442*0Sstevel@tonic-gate else
1443*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1444*0Sstevel@tonic-gate
1445*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
1446*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1447*0Sstevel@tonic-gate "rsmka_do_path_active done\n"));
1448*0Sstevel@tonic-gate return (B_TRUE);
1449*0Sstevel@tonic-gate } else {
1450*0Sstevel@tonic-gate /*
1451*0Sstevel@tonic-gate * Drop the path lock before calling create_ipc_sendq, shouldn't
1452*0Sstevel@tonic-gate * hold locks across calls to RSMPI routines.
1453*0Sstevel@tonic-gate */
1454*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1455*0Sstevel@tonic-gate
1456*0Sstevel@tonic-gate error = create_ipc_sendq(path);
1457*0Sstevel@tonic-gate
1458*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1459*0Sstevel@tonic-gate if (path->state != RSMKA_PATH_UP) {
1460*0Sstevel@tonic-gate /*
1461*0Sstevel@tonic-gate * path state has changed, if sendq was created,
1462*0Sstevel@tonic-gate * destroy it and return
1463*0Sstevel@tonic-gate */
1464*0Sstevel@tonic-gate if (error == RSM_SUCCESS) {
1465*0Sstevel@tonic-gate sqhdl = path->sendq_token.rsmpi_sendq_handle;
1466*0Sstevel@tonic-gate path->sendq_token.rsmpi_sendq_handle = NULL;
1467*0Sstevel@tonic-gate adapter = path->local_adapter;
1468*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1469*0Sstevel@tonic-gate
1470*0Sstevel@tonic-gate if (sqhdl != NULL) {
1471*0Sstevel@tonic-gate adapter->rsmpi_ops->rsm_sendq_destroy(
1472*0Sstevel@tonic-gate sqhdl);
1473*0Sstevel@tonic-gate }
1474*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1475*0Sstevel@tonic-gate }
1476*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1477*0Sstevel@tonic-gate
1478*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1479*0Sstevel@tonic-gate "rsmka_do_path_active done: path=%lx not UP\n",
1480*0Sstevel@tonic-gate (uintptr_t)path));
1481*0Sstevel@tonic-gate return (error ? B_FALSE : B_TRUE);
1482*0Sstevel@tonic-gate }
1483*0Sstevel@tonic-gate
1484*0Sstevel@tonic-gate if (error == RSM_SUCCESS) {
1485*0Sstevel@tonic-gate /* clear flag since sendq_create succeeded */
1486*0Sstevel@tonic-gate path->flags &= ~RSMKA_SQCREATE_PENDING;
1487*0Sstevel@tonic-gate path->state = RSMKA_PATH_ACTIVE;
1488*0Sstevel@tonic-gate /*
1489*0Sstevel@tonic-gate * now that path is active we send the
1490*0Sstevel@tonic-gate * RSMIPC_MSG_SQREADY to the remote endpoint
1491*0Sstevel@tonic-gate */
1492*0Sstevel@tonic-gate path->procmsg_cnt = 0;
1493*0Sstevel@tonic-gate path->sendq_token.msgbuf_avail = 0;
1494*0Sstevel@tonic-gate
1495*0Sstevel@tonic-gate /* Calculate local incarnation number */
1496*0Sstevel@tonic-gate gethrestime(&tv);
1497*0Sstevel@tonic-gate if (tv.tv_sec == RSM_UNKNOWN_INCN)
1498*0Sstevel@tonic-gate tv.tv_sec = 1;
1499*0Sstevel@tonic-gate path->local_incn = (int64_t)tv.tv_sec;
1500*0Sstevel@tonic-gate
1501*0Sstevel@tonic-gate /*
1502*0Sstevel@tonic-gate * if send fails here its due to some non-transient
1503*0Sstevel@tonic-gate * error because QUEUE_FULL is not possible here since
1504*0Sstevel@tonic-gate * we are the first message on this sendq. The error
1505*0Sstevel@tonic-gate * will cause the path to go down anyways so ignore
1506*0Sstevel@tonic-gate * the return value
1507*0Sstevel@tonic-gate */
1508*0Sstevel@tonic-gate (void) rsmipc_send_controlmsg(path, RSMIPC_MSG_SQREADY);
1509*0Sstevel@tonic-gate /* wait for SQREADY_ACK message */
1510*0Sstevel@tonic-gate path->flags |= RSMKA_WAIT_FOR_SQACK;
1511*0Sstevel@tonic-gate
1512*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1513*0Sstevel@tonic-gate "rsmka_do_path_active success\n"));
1514*0Sstevel@tonic-gate } else {
1515*0Sstevel@tonic-gate /*
1516*0Sstevel@tonic-gate * sendq create failed possibly because
1517*0Sstevel@tonic-gate * the remote end is not yet ready eg.
1518*0Sstevel@tonic-gate * handler not registered, set a flag
1519*0Sstevel@tonic-gate * so that when there is an indication
1520*0Sstevel@tonic-gate * that the remote end is ready rsmka_do_path_active
1521*0Sstevel@tonic-gate * will be retried.
1522*0Sstevel@tonic-gate */
1523*0Sstevel@tonic-gate path->flags |= RSMKA_SQCREATE_PENDING;
1524*0Sstevel@tonic-gate }
1525*0Sstevel@tonic-gate
1526*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1527*0Sstevel@tonic-gate
1528*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1529*0Sstevel@tonic-gate "rsmka_do_path_active done\n"));
1530*0Sstevel@tonic-gate return (error ? B_FALSE : B_TRUE);
1531*0Sstevel@tonic-gate }
1532*0Sstevel@tonic-gate
1533*0Sstevel@tonic-gate }
1534*0Sstevel@tonic-gate
1535*0Sstevel@tonic-gate /*
1536*0Sstevel@tonic-gate * Called from rsm_path_up.
1537*0Sstevel@tonic-gate * If the remote node state is "alive" then call rsmka_do_path_active
1538*0Sstevel@tonic-gate * otherwise just transition path state to RSMKA_PATH_UP.
1539*0Sstevel@tonic-gate */
1540*0Sstevel@tonic-gate static boolean_t
do_path_up(path_t * path,int flags)1541*0Sstevel@tonic-gate do_path_up(path_t *path, int flags)
1542*0Sstevel@tonic-gate {
1543*0Sstevel@tonic-gate boolean_t rval;
1544*0Sstevel@tonic-gate boolean_t node_alive;
1545*0Sstevel@tonic-gate
1546*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_up enter\n"));
1547*0Sstevel@tonic-gate
1548*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
1549*0Sstevel@tonic-gate
1550*0Sstevel@tonic-gate /* path moved to ACTIVE by rsm_sqcreateop_callback - just return */
1551*0Sstevel@tonic-gate if (path->state == RSMKA_PATH_ACTIVE) {
1552*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1553*0Sstevel@tonic-gate "do_path_up done: already ACTIVE\n"));
1554*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1555*0Sstevel@tonic-gate return (B_TRUE);
1556*0Sstevel@tonic-gate }
1557*0Sstevel@tonic-gate
1558*0Sstevel@tonic-gate path->state = RSMKA_PATH_UP;
1559*0Sstevel@tonic-gate
1560*0Sstevel@tonic-gate /* initialize the receive msgbuf counters */
1561*0Sstevel@tonic-gate path->msgbuf_head = 0;
1562*0Sstevel@tonic-gate path->msgbuf_tail = RSMIPC_MAX_MESSAGES - 1;
1563*0Sstevel@tonic-gate path->msgbuf_cnt = 0;
1564*0Sstevel@tonic-gate path->procmsg_cnt = 0;
1565*0Sstevel@tonic-gate /*
1566*0Sstevel@tonic-gate * rsmka_check_node_alive acquires ipc_info_lock, in order to maintain
1567*0Sstevel@tonic-gate * correct lock ordering drop the path lock before calling it.
1568*0Sstevel@tonic-gate */
1569*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1570*0Sstevel@tonic-gate
1571*0Sstevel@tonic-gate node_alive = rsmka_check_node_alive(path->remote_node);
1572*0Sstevel@tonic-gate
1573*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1574*0Sstevel@tonic-gate if (node_alive == B_TRUE)
1575*0Sstevel@tonic-gate rval = rsmka_do_path_active(path, flags);
1576*0Sstevel@tonic-gate else {
1577*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1578*0Sstevel@tonic-gate rval = B_TRUE;
1579*0Sstevel@tonic-gate }
1580*0Sstevel@tonic-gate
1581*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_up done\n"));
1582*0Sstevel@tonic-gate return (rval);
1583*0Sstevel@tonic-gate }
1584*0Sstevel@tonic-gate
1585*0Sstevel@tonic-gate
1586*0Sstevel@tonic-gate
1587*0Sstevel@tonic-gate /*
1588*0Sstevel@tonic-gate * Called from rsm_remove_path, rsm_path_down, deferred_work.
1589*0Sstevel@tonic-gate * Destroy the send queue on this path.
1590*0Sstevel@tonic-gate * Disconnect segments being imported from the remote node
1591*0Sstevel@tonic-gate * Disconnect segments being imported by the remote node
1592*0Sstevel@tonic-gate *
1593*0Sstevel@tonic-gate */
1594*0Sstevel@tonic-gate static void
do_path_down(path_t * path,int flags)1595*0Sstevel@tonic-gate do_path_down(path_t *path, int flags)
1596*0Sstevel@tonic-gate {
1597*0Sstevel@tonic-gate work_token_t *up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1598*0Sstevel@tonic-gate work_token_t *down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1599*0Sstevel@tonic-gate boolean_t do_work = B_FALSE;
1600*0Sstevel@tonic-gate
1601*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_down enter\n"));
1602*0Sstevel@tonic-gate
1603*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
1604*0Sstevel@tonic-gate
1605*0Sstevel@tonic-gate if (flags & RSMKA_NO_SLEEP) {
1606*0Sstevel@tonic-gate mutex_enter(&work_queue.work_mutex);
1607*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
1608*0Sstevel@tonic-gate "do_path_down: after work_mutex\n"));
1609*0Sstevel@tonic-gate
1610*0Sstevel@tonic-gate /* if an up token is enqueued, remove it */
1611*0Sstevel@tonic-gate if (cancel_work(up_token)) {
1612*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1613*0Sstevel@tonic-gate }
1614*0Sstevel@tonic-gate
1615*0Sstevel@tonic-gate /*
1616*0Sstevel@tonic-gate * If the path is active and down work hasn't
1617*0Sstevel@tonic-gate * already been setup then down work is needed.
1618*0Sstevel@tonic-gate * else
1619*0Sstevel@tonic-gate * if up work wasn't canceled because it was
1620*0Sstevel@tonic-gate * already being processed then down work is needed
1621*0Sstevel@tonic-gate */
1622*0Sstevel@tonic-gate if (path->state == RSMKA_PATH_ACTIVE) {
1623*0Sstevel@tonic-gate if (down_token->opcode == 0)
1624*0Sstevel@tonic-gate do_work = B_TRUE;
1625*0Sstevel@tonic-gate } else
1626*0Sstevel@tonic-gate if (up_token->opcode == RSMKA_IPC_UP)
1627*0Sstevel@tonic-gate do_work = B_TRUE;
1628*0Sstevel@tonic-gate
1629*0Sstevel@tonic-gate if (do_work == B_TRUE) {
1630*0Sstevel@tonic-gate down_token->opcode = RSMKA_IPC_DOWN;
1631*0Sstevel@tonic-gate enqueue_work(down_token);
1632*0Sstevel@tonic-gate } else
1633*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
1634*0Sstevel@tonic-gate
1635*0Sstevel@tonic-gate
1636*0Sstevel@tonic-gate mutex_exit(&work_queue.work_mutex);
1637*0Sstevel@tonic-gate
1638*0Sstevel@tonic-gate } else {
1639*0Sstevel@tonic-gate
1640*0Sstevel@tonic-gate /*
1641*0Sstevel@tonic-gate * Change state of the path to RSMKA_PATH_GOING_DOWN and
1642*0Sstevel@tonic-gate * release the path mutex. Any other thread referring
1643*0Sstevel@tonic-gate * this path would cv_wait till the state of the path
1644*0Sstevel@tonic-gate * remains RSMKA_PATH_GOING_DOWN.
1645*0Sstevel@tonic-gate * On completing the path down processing, change the
1646*0Sstevel@tonic-gate * state of RSMKA_PATH_DOWN indicating that the path
1647*0Sstevel@tonic-gate * is indeed down.
1648*0Sstevel@tonic-gate */
1649*0Sstevel@tonic-gate path->state = RSMKA_PATH_GOING_DOWN;
1650*0Sstevel@tonic-gate
1651*0Sstevel@tonic-gate /*
1652*0Sstevel@tonic-gate * clear the WAIT_FOR_SQACK flag since path is going down.
1653*0Sstevel@tonic-gate */
1654*0Sstevel@tonic-gate path->flags &= ~RSMKA_WAIT_FOR_SQACK;
1655*0Sstevel@tonic-gate
1656*0Sstevel@tonic-gate /*
1657*0Sstevel@tonic-gate * this wakes up any thread waiting to receive credits
1658*0Sstevel@tonic-gate * in rsmipc_send to tell it that the path is going down
1659*0Sstevel@tonic-gate */
1660*0Sstevel@tonic-gate cv_broadcast(&path->sendq_token.sendq_cv);
1661*0Sstevel@tonic-gate
1662*0Sstevel@tonic-gate mutex_exit(&path->mutex);
1663*0Sstevel@tonic-gate
1664*0Sstevel@tonic-gate /*
1665*0Sstevel@tonic-gate * drain the messages from the receive msgbuf, the
1666*0Sstevel@tonic-gate * tasks in the taskq_thread acquire the path->mutex
1667*0Sstevel@tonic-gate * so we drop the path mutex before taskq_wait.
1668*0Sstevel@tonic-gate */
1669*0Sstevel@tonic-gate taskq_wait(path->recv_taskq);
1670*0Sstevel@tonic-gate
1671*0Sstevel@tonic-gate /*
1672*0Sstevel@tonic-gate * Disconnect segments being imported from the remote node
1673*0Sstevel@tonic-gate * The path_importer_disconnect function needs to be called
1674*0Sstevel@tonic-gate * only after releasing the mutex on the path. This is to
1675*0Sstevel@tonic-gate * avoid a recursive mutex enter when doing the
1676*0Sstevel@tonic-gate * rsmka_get_sendq_token.
1677*0Sstevel@tonic-gate */
1678*0Sstevel@tonic-gate path_importer_disconnect(path);
1679*0Sstevel@tonic-gate
1680*0Sstevel@tonic-gate /*
1681*0Sstevel@tonic-gate * Get the path mutex, change the state of the path to
1682*0Sstevel@tonic-gate * RSMKA_PATH_DOWN since the path down processing has
1683*0Sstevel@tonic-gate * completed and cv_signal anyone who was waiting since
1684*0Sstevel@tonic-gate * the state was RSMKA_PATH_GOING_DOWN.
1685*0Sstevel@tonic-gate * NOTE: Do not do a mutex_exit here. We entered this
1686*0Sstevel@tonic-gate * routine with the path lock held by the caller. The
1687*0Sstevel@tonic-gate * caller eventually releases the path lock by doing a
1688*0Sstevel@tonic-gate * mutex_exit.
1689*0Sstevel@tonic-gate */
1690*0Sstevel@tonic-gate mutex_enter(&path->mutex);
1691*0Sstevel@tonic-gate
1692*0Sstevel@tonic-gate #ifdef DEBUG
1693*0Sstevel@tonic-gate /*
1694*0Sstevel@tonic-gate * Some IPC messages left in the recv_buf,
1695*0Sstevel@tonic-gate * they'll be dropped
1696*0Sstevel@tonic-gate */
1697*0Sstevel@tonic-gate if (path->msgbuf_cnt != 0)
1698*0Sstevel@tonic-gate cmn_err(CE_NOTE, "path=%lx msgbuf_cnt != 0\n",
1699*0Sstevel@tonic-gate (uintptr_t)path);
1700*0Sstevel@tonic-gate #endif
1701*0Sstevel@tonic-gate while (path->sendq_token.ref_cnt != 0)
1702*0Sstevel@tonic-gate cv_wait(&path->sendq_token.sendq_cv,
1703*0Sstevel@tonic-gate &path->mutex);
1704*0Sstevel@tonic-gate
1705*0Sstevel@tonic-gate /* release the rsmpi handle */
1706*0Sstevel@tonic-gate if (path->sendq_token.rsmpi_sendq_handle != NULL)
1707*0Sstevel@tonic-gate path->local_adapter->rsmpi_ops->rsm_sendq_destroy(
1708*0Sstevel@tonic-gate path->sendq_token.rsmpi_sendq_handle);
1709*0Sstevel@tonic-gate
1710*0Sstevel@tonic-gate path->sendq_token.rsmpi_sendq_handle = NULL;
1711*0Sstevel@tonic-gate
1712*0Sstevel@tonic-gate path->state = RSMKA_PATH_DOWN;
1713*0Sstevel@tonic-gate
1714*0Sstevel@tonic-gate cv_signal(&path->hold_cv);
1715*0Sstevel@tonic-gate
1716*0Sstevel@tonic-gate }
1717*0Sstevel@tonic-gate
1718*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_down done\n"));
1719*0Sstevel@tonic-gate
1720*0Sstevel@tonic-gate }
1721*0Sstevel@tonic-gate
1722*0Sstevel@tonic-gate /*
1723*0Sstevel@tonic-gate * Search through the list of imported segments for segments using this path
1724*0Sstevel@tonic-gate * and unload the memory mappings for each one. The application will
1725*0Sstevel@tonic-gate * get an error return when a barrier close is invoked.
1726*0Sstevel@tonic-gate * NOTE: This function has to be called only after releasing the mutex on
1727*0Sstevel@tonic-gate * the path. This is to avoid any recursive mutex panics on the path mutex
1728*0Sstevel@tonic-gate * since the path_importer_disconnect function would end up calling
1729*0Sstevel@tonic-gate * rsmka_get_sendq_token which requires the path mutex.
1730*0Sstevel@tonic-gate */
1731*0Sstevel@tonic-gate
1732*0Sstevel@tonic-gate static void
path_importer_disconnect(path_t * path)1733*0Sstevel@tonic-gate path_importer_disconnect(path_t *path)
1734*0Sstevel@tonic-gate {
1735*0Sstevel@tonic-gate int i;
1736*0Sstevel@tonic-gate adapter_t *adapter = path->local_adapter;
1737*0Sstevel@tonic-gate rsm_node_id_t remote_node = path->remote_node;
1738*0Sstevel@tonic-gate rsmresource_t *p = NULL;
1739*0Sstevel@tonic-gate rsmseg_t *seg;
1740*0Sstevel@tonic-gate
1741*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1742*0Sstevel@tonic-gate "path_importer_disconnect enter\n"));
1743*0Sstevel@tonic-gate
1744*0Sstevel@tonic-gate rw_enter(&rsm_import_segs.rsmhash_rw, RW_READER);
1745*0Sstevel@tonic-gate
1746*0Sstevel@tonic-gate if (rsm_import_segs.bucket != NULL) {
1747*0Sstevel@tonic-gate for (i = 0; i < rsm_hash_size; i++) {
1748*0Sstevel@tonic-gate p = rsm_import_segs.bucket[i];
1749*0Sstevel@tonic-gate for (; p; p = p->rsmrc_next) {
1750*0Sstevel@tonic-gate if ((p->rsmrc_node == remote_node) &&
1751*0Sstevel@tonic-gate (p->rsmrc_adapter == adapter)) {
1752*0Sstevel@tonic-gate seg = (rsmseg_t *)p;
1753*0Sstevel@tonic-gate /*
1754*0Sstevel@tonic-gate * In order to make rsmseg_unload and
1755*0Sstevel@tonic-gate * path_importer_disconnect thread safe, acquire the
1756*0Sstevel@tonic-gate * segment lock here. rsmseg_unload is responsible for
1757*0Sstevel@tonic-gate * releasing the lock. rsmseg_unload releases the lock
1758*0Sstevel@tonic-gate * just before a call to rsmipc_send or in case of an
1759*0Sstevel@tonic-gate * early exit which occurs if the segment was in the
1760*0Sstevel@tonic-gate * state RSM_STATE_CONNECTING or RSM_STATE_NEW.
1761*0Sstevel@tonic-gate */
1762*0Sstevel@tonic-gate rsmseglock_acquire(seg);
1763*0Sstevel@tonic-gate seg->s_flags |= RSM_FORCE_DISCONNECT;
1764*0Sstevel@tonic-gate rsmseg_unload(seg);
1765*0Sstevel@tonic-gate }
1766*0Sstevel@tonic-gate }
1767*0Sstevel@tonic-gate }
1768*0Sstevel@tonic-gate }
1769*0Sstevel@tonic-gate rw_exit(&rsm_import_segs.rsmhash_rw);
1770*0Sstevel@tonic-gate
1771*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1772*0Sstevel@tonic-gate "path_importer_disconnect done\n"));
1773*0Sstevel@tonic-gate }
1774*0Sstevel@tonic-gate
1775*0Sstevel@tonic-gate
1776*0Sstevel@tonic-gate
1777*0Sstevel@tonic-gate
1778*0Sstevel@tonic-gate /*
1779*0Sstevel@tonic-gate *
1780*0Sstevel@tonic-gate * ADAPTER UTILITY FUNCTIONS
1781*0Sstevel@tonic-gate *
1782*0Sstevel@tonic-gate */
1783*0Sstevel@tonic-gate
1784*0Sstevel@tonic-gate
1785*0Sstevel@tonic-gate
1786*0Sstevel@tonic-gate /*
1787*0Sstevel@tonic-gate * Allocate new adapter list head structure and add it to the beginning of
1788*0Sstevel@tonic-gate * the list of adapter list heads. There is one list for each adapter
1789*0Sstevel@tonic-gate * device name (or type).
1790*0Sstevel@tonic-gate */
1791*0Sstevel@tonic-gate static adapter_listhead_t *
init_listhead(char * name)1792*0Sstevel@tonic-gate init_listhead(char *name)
1793*0Sstevel@tonic-gate {
1794*0Sstevel@tonic-gate adapter_listhead_t *listhead;
1795*0Sstevel@tonic-gate
1796*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_listhead enter\n"));
1797*0Sstevel@tonic-gate
1798*0Sstevel@tonic-gate /* allocation and initialization */
1799*0Sstevel@tonic-gate listhead = kmem_zalloc(sizeof (adapter_listhead_t), KM_SLEEP);
1800*0Sstevel@tonic-gate mutex_init(&listhead->mutex, NULL, MUTEX_DEFAULT, NULL);
1801*0Sstevel@tonic-gate (void) strcpy(listhead->adapter_devname, name);
1802*0Sstevel@tonic-gate
1803*0Sstevel@tonic-gate /* link into list of listheads */
1804*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
1805*0Sstevel@tonic-gate if (adapter_listhead_base.next == NULL) {
1806*0Sstevel@tonic-gate adapter_listhead_base.next = listhead;
1807*0Sstevel@tonic-gate listhead->next_listhead = NULL;
1808*0Sstevel@tonic-gate } else {
1809*0Sstevel@tonic-gate listhead->next_listhead = adapter_listhead_base.next;
1810*0Sstevel@tonic-gate adapter_listhead_base.next = listhead;
1811*0Sstevel@tonic-gate }
1812*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
1813*0Sstevel@tonic-gate
1814*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_listhead done\n"));
1815*0Sstevel@tonic-gate
1816*0Sstevel@tonic-gate return (listhead);
1817*0Sstevel@tonic-gate }
1818*0Sstevel@tonic-gate
1819*0Sstevel@tonic-gate
1820*0Sstevel@tonic-gate /*
1821*0Sstevel@tonic-gate * Search the list of adapter list heads for a match on name.
1822*0Sstevel@tonic-gate *
1823*0Sstevel@tonic-gate */
1824*0Sstevel@tonic-gate static adapter_listhead_t *
lookup_adapter_listhead(char * name)1825*0Sstevel@tonic-gate lookup_adapter_listhead(char *name)
1826*0Sstevel@tonic-gate {
1827*0Sstevel@tonic-gate adapter_listhead_t *listhead;
1828*0Sstevel@tonic-gate
1829*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1830*0Sstevel@tonic-gate "lookup_adapter_listhead enter\n"));
1831*0Sstevel@tonic-gate
1832*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
1833*0Sstevel@tonic-gate listhead = adapter_listhead_base.next;
1834*0Sstevel@tonic-gate while (listhead != NULL) {
1835*0Sstevel@tonic-gate if (strcmp(name, listhead->adapter_devname) == 0)
1836*0Sstevel@tonic-gate break;
1837*0Sstevel@tonic-gate listhead = listhead->next_listhead;
1838*0Sstevel@tonic-gate }
1839*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
1840*0Sstevel@tonic-gate
1841*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1842*0Sstevel@tonic-gate "lookup_adapter_listhead done\n"));
1843*0Sstevel@tonic-gate
1844*0Sstevel@tonic-gate return (listhead);
1845*0Sstevel@tonic-gate }
1846*0Sstevel@tonic-gate
1847*0Sstevel@tonic-gate
1848*0Sstevel@tonic-gate /*
1849*0Sstevel@tonic-gate * Get the adapter list head corresponding to devname and search for
1850*0Sstevel@tonic-gate * an adapter descriptor with a match on the instance number. If
1851*0Sstevel@tonic-gate * successful, increment the descriptor reference count and return
1852*0Sstevel@tonic-gate * the descriptor pointer to the caller.
1853*0Sstevel@tonic-gate *
1854*0Sstevel@tonic-gate */
1855*0Sstevel@tonic-gate adapter_t *
rsmka_lookup_adapter(char * devname,int instance)1856*0Sstevel@tonic-gate rsmka_lookup_adapter(char *devname, int instance)
1857*0Sstevel@tonic-gate {
1858*0Sstevel@tonic-gate adapter_listhead_t *listhead;
1859*0Sstevel@tonic-gate adapter_t *current = NULL;
1860*0Sstevel@tonic-gate
1861*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1862*0Sstevel@tonic-gate "rsmka_lookup_adapter enter\n"));
1863*0Sstevel@tonic-gate
1864*0Sstevel@tonic-gate listhead = lookup_adapter_listhead(devname);
1865*0Sstevel@tonic-gate if (listhead != NULL) {
1866*0Sstevel@tonic-gate mutex_enter(&listhead->mutex);
1867*0Sstevel@tonic-gate
1868*0Sstevel@tonic-gate current = listhead->next_adapter;
1869*0Sstevel@tonic-gate while (current != NULL) {
1870*0Sstevel@tonic-gate if (current->instance == instance) {
1871*0Sstevel@tonic-gate ADAPTER_HOLD(current);
1872*0Sstevel@tonic-gate break;
1873*0Sstevel@tonic-gate } else
1874*0Sstevel@tonic-gate current = current->next;
1875*0Sstevel@tonic-gate }
1876*0Sstevel@tonic-gate
1877*0Sstevel@tonic-gate mutex_exit(&listhead->mutex);
1878*0Sstevel@tonic-gate }
1879*0Sstevel@tonic-gate
1880*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1881*0Sstevel@tonic-gate "rsmka_lookup_adapter done\n"));
1882*0Sstevel@tonic-gate
1883*0Sstevel@tonic-gate return (current);
1884*0Sstevel@tonic-gate }
1885*0Sstevel@tonic-gate
1886*0Sstevel@tonic-gate /*
1887*0Sstevel@tonic-gate * Called from rsmka_remove_adapter or rsmseg_free.
1888*0Sstevel@tonic-gate * rsm_bind() and rsm_connect() store the adapter pointer returned
1889*0Sstevel@tonic-gate * from rsmka_getadapter. The pointer is kept in the segment descriptor.
1890*0Sstevel@tonic-gate * When the segment is freed, this routine is called by rsmseg_free to decrement
1891*0Sstevel@tonic-gate * the adapter descriptor reference count and possibly free the
1892*0Sstevel@tonic-gate * descriptor.
1893*0Sstevel@tonic-gate */
1894*0Sstevel@tonic-gate void
rsmka_release_adapter(adapter_t * adapter)1895*0Sstevel@tonic-gate rsmka_release_adapter(adapter_t *adapter)
1896*0Sstevel@tonic-gate {
1897*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1898*0Sstevel@tonic-gate "rsmka_release_adapter enter\n"));
1899*0Sstevel@tonic-gate
1900*0Sstevel@tonic-gate if (adapter == &loopback_adapter) {
1901*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1902*0Sstevel@tonic-gate "rsmka_release_adapter done\n"));
1903*0Sstevel@tonic-gate return;
1904*0Sstevel@tonic-gate }
1905*0Sstevel@tonic-gate
1906*0Sstevel@tonic-gate mutex_enter(&adapter->mutex);
1907*0Sstevel@tonic-gate
1908*0Sstevel@tonic-gate /* decrement reference count */
1909*0Sstevel@tonic-gate ADAPTER_RELE_NOLOCK(adapter);
1910*0Sstevel@tonic-gate
1911*0Sstevel@tonic-gate /*
1912*0Sstevel@tonic-gate * if the adapter descriptor reference count is equal to the
1913*0Sstevel@tonic-gate * initialization value of one, then the descriptor has been
1914*0Sstevel@tonic-gate * unlinked and can now be freed.
1915*0Sstevel@tonic-gate */
1916*0Sstevel@tonic-gate if (adapter->ref_cnt == 1) {
1917*0Sstevel@tonic-gate mutex_exit(&adapter->mutex);
1918*0Sstevel@tonic-gate
1919*0Sstevel@tonic-gate mutex_destroy(&adapter->mutex);
1920*0Sstevel@tonic-gate kmem_free(adapter->hdlr_argp, sizeof (srv_handler_arg_t));
1921*0Sstevel@tonic-gate kmem_free(adapter, sizeof (adapter_t));
1922*0Sstevel@tonic-gate }
1923*0Sstevel@tonic-gate else
1924*0Sstevel@tonic-gate mutex_exit(&adapter->mutex);
1925*0Sstevel@tonic-gate
1926*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1927*0Sstevel@tonic-gate "rsmka_release_adapter done\n"));
1928*0Sstevel@tonic-gate
1929*0Sstevel@tonic-gate }
1930*0Sstevel@tonic-gate
1931*0Sstevel@tonic-gate
1932*0Sstevel@tonic-gate
1933*0Sstevel@tonic-gate /*
1934*0Sstevel@tonic-gate * Singly linked list. Add to the front.
1935*0Sstevel@tonic-gate */
1936*0Sstevel@tonic-gate static void
link_adapter(adapter_t * adapter)1937*0Sstevel@tonic-gate link_adapter(adapter_t *adapter)
1938*0Sstevel@tonic-gate {
1939*0Sstevel@tonic-gate
1940*0Sstevel@tonic-gate adapter_listhead_t *listhead;
1941*0Sstevel@tonic-gate adapter_t *current;
1942*0Sstevel@tonic-gate
1943*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_adapter enter\n"));
1944*0Sstevel@tonic-gate
1945*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
1946*0Sstevel@tonic-gate
1947*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
1948*0Sstevel@tonic-gate
1949*0Sstevel@tonic-gate listhead = adapter->listhead;
1950*0Sstevel@tonic-gate current = listhead->next_adapter;
1951*0Sstevel@tonic-gate listhead->next_adapter = adapter;
1952*0Sstevel@tonic-gate adapter->next = current;
1953*0Sstevel@tonic-gate ADAPTER_HOLD(adapter);
1954*0Sstevel@tonic-gate
1955*0Sstevel@tonic-gate adapter->listhead->adapter_count++;
1956*0Sstevel@tonic-gate
1957*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
1958*0Sstevel@tonic-gate
1959*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
1960*0Sstevel@tonic-gate
1961*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_adapter done\n"));
1962*0Sstevel@tonic-gate }
1963*0Sstevel@tonic-gate
1964*0Sstevel@tonic-gate
1965*0Sstevel@tonic-gate /*
1966*0Sstevel@tonic-gate * Return adapter descriptor
1967*0Sstevel@tonic-gate *
1968*0Sstevel@tonic-gate * lookup_adapter_listhead returns with the the list of adapter listheads
1969*0Sstevel@tonic-gate * locked. After adding the adapter descriptor, the adapter listhead list
1970*0Sstevel@tonic-gate * lock is dropped.
1971*0Sstevel@tonic-gate */
1972*0Sstevel@tonic-gate static adapter_t *
init_adapter(char * name,int instance,rsm_addr_t hwaddr,rsm_controller_handle_t handle,rsm_ops_t * ops,srv_handler_arg_t * hdlr_argp)1973*0Sstevel@tonic-gate init_adapter(char *name, int instance, rsm_addr_t hwaddr,
1974*0Sstevel@tonic-gate rsm_controller_handle_t handle, rsm_ops_t *ops,
1975*0Sstevel@tonic-gate srv_handler_arg_t *hdlr_argp)
1976*0Sstevel@tonic-gate {
1977*0Sstevel@tonic-gate adapter_t *adapter;
1978*0Sstevel@tonic-gate adapter_listhead_t *listhead;
1979*0Sstevel@tonic-gate
1980*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_adapter enter\n"));
1981*0Sstevel@tonic-gate
1982*0Sstevel@tonic-gate adapter = kmem_zalloc(sizeof (adapter_t), KM_SLEEP);
1983*0Sstevel@tonic-gate adapter->instance = instance;
1984*0Sstevel@tonic-gate adapter->hwaddr = hwaddr;
1985*0Sstevel@tonic-gate adapter->rsmpi_handle = handle;
1986*0Sstevel@tonic-gate adapter->rsmpi_ops = ops;
1987*0Sstevel@tonic-gate adapter->hdlr_argp = hdlr_argp;
1988*0Sstevel@tonic-gate mutex_init(&adapter->mutex, NULL, MUTEX_DEFAULT, NULL);
1989*0Sstevel@tonic-gate ADAPTER_HOLD(adapter);
1990*0Sstevel@tonic-gate
1991*0Sstevel@tonic-gate
1992*0Sstevel@tonic-gate listhead = lookup_adapter_listhead(name);
1993*0Sstevel@tonic-gate if (listhead == NULL) {
1994*0Sstevel@tonic-gate listhead = init_listhead(name);
1995*0Sstevel@tonic-gate }
1996*0Sstevel@tonic-gate
1997*0Sstevel@tonic-gate adapter->listhead = listhead;
1998*0Sstevel@tonic-gate
1999*0Sstevel@tonic-gate link_adapter(adapter);
2000*0Sstevel@tonic-gate
2001*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_adapter done\n"));
2002*0Sstevel@tonic-gate
2003*0Sstevel@tonic-gate return (adapter);
2004*0Sstevel@tonic-gate }
2005*0Sstevel@tonic-gate
2006*0Sstevel@tonic-gate /*
2007*0Sstevel@tonic-gate *
2008*0Sstevel@tonic-gate * PATH UTILITY FUNCTIONS
2009*0Sstevel@tonic-gate *
2010*0Sstevel@tonic-gate */
2011*0Sstevel@tonic-gate
2012*0Sstevel@tonic-gate
2013*0Sstevel@tonic-gate /*
2014*0Sstevel@tonic-gate * Search the per adapter path list for a match on remote node and
2015*0Sstevel@tonic-gate * hwaddr. The path ref_cnt must be greater than zero or the path
2016*0Sstevel@tonic-gate * is in the process of being removed.
2017*0Sstevel@tonic-gate *
2018*0Sstevel@tonic-gate * Acquire the path lock and increment the path hold count.
2019*0Sstevel@tonic-gate */
2020*0Sstevel@tonic-gate static path_t *
lookup_path(char * adapter_devname,int adapter_instance,rsm_node_id_t remote_node,rsm_addr_t hwaddr)2021*0Sstevel@tonic-gate lookup_path(char *adapter_devname, int adapter_instance,
2022*0Sstevel@tonic-gate rsm_node_id_t remote_node, rsm_addr_t hwaddr)
2023*0Sstevel@tonic-gate {
2024*0Sstevel@tonic-gate path_t *current;
2025*0Sstevel@tonic-gate adapter_t *adapter;
2026*0Sstevel@tonic-gate
2027*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_path enter\n"));
2028*0Sstevel@tonic-gate
2029*0Sstevel@tonic-gate adapter = rsmka_lookup_adapter(adapter_devname, adapter_instance);
2030*0Sstevel@tonic-gate ASSERT(adapter != NULL);
2031*0Sstevel@tonic-gate
2032*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
2033*0Sstevel@tonic-gate
2034*0Sstevel@tonic-gate /* start at the list head */
2035*0Sstevel@tonic-gate current = adapter->next_path;
2036*0Sstevel@tonic-gate
2037*0Sstevel@tonic-gate while (current != NULL) {
2038*0Sstevel@tonic-gate if ((current->remote_node == remote_node) &&
2039*0Sstevel@tonic-gate (current->remote_hwaddr == hwaddr) &&
2040*0Sstevel@tonic-gate (current->ref_cnt > 0))
2041*0Sstevel@tonic-gate break;
2042*0Sstevel@tonic-gate else
2043*0Sstevel@tonic-gate current = current->next_path;
2044*0Sstevel@tonic-gate }
2045*0Sstevel@tonic-gate if (current != NULL) {
2046*0Sstevel@tonic-gate mutex_enter(¤t->mutex);
2047*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(current);
2048*0Sstevel@tonic-gate }
2049*0Sstevel@tonic-gate
2050*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
2051*0Sstevel@tonic-gate ADAPTER_RELE(adapter);
2052*0Sstevel@tonic-gate
2053*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_path done\n"));
2054*0Sstevel@tonic-gate
2055*0Sstevel@tonic-gate return (current);
2056*0Sstevel@tonic-gate }
2057*0Sstevel@tonic-gate
2058*0Sstevel@tonic-gate /*
2059*0Sstevel@tonic-gate * This interface is similar to lookup_path but takes only the local
2060*0Sstevel@tonic-gate * adapter name, instance and remote adapters hwaddr to identify the
2061*0Sstevel@tonic-gate * path. This is used in the interrupt handler routines where nodeid
2062*0Sstevel@tonic-gate * is not always available.
2063*0Sstevel@tonic-gate */
2064*0Sstevel@tonic-gate path_t *
rsm_find_path(char * adapter_devname,int adapter_instance,rsm_addr_t hwaddr)2065*0Sstevel@tonic-gate rsm_find_path(char *adapter_devname, int adapter_instance, rsm_addr_t hwaddr)
2066*0Sstevel@tonic-gate {
2067*0Sstevel@tonic-gate path_t *current;
2068*0Sstevel@tonic-gate adapter_t *adapter;
2069*0Sstevel@tonic-gate
2070*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_find_path enter\n"));
2071*0Sstevel@tonic-gate
2072*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2073*0Sstevel@tonic-gate "rsm_find_path:adapter=%s:%d,rem=%llx\n",
2074*0Sstevel@tonic-gate adapter_devname, adapter_instance, hwaddr));
2075*0Sstevel@tonic-gate
2076*0Sstevel@tonic-gate adapter = rsmka_lookup_adapter(adapter_devname, adapter_instance);
2077*0Sstevel@tonic-gate
2078*0Sstevel@tonic-gate /*
2079*0Sstevel@tonic-gate * its possible that we are here due to an interrupt but the adapter
2080*0Sstevel@tonic-gate * has been removed after we received the callback.
2081*0Sstevel@tonic-gate */
2082*0Sstevel@tonic-gate if (adapter == NULL)
2083*0Sstevel@tonic-gate return (NULL);
2084*0Sstevel@tonic-gate
2085*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
2086*0Sstevel@tonic-gate
2087*0Sstevel@tonic-gate /* start at the list head */
2088*0Sstevel@tonic-gate current = adapter->next_path;
2089*0Sstevel@tonic-gate
2090*0Sstevel@tonic-gate while (current != NULL) {
2091*0Sstevel@tonic-gate if ((current->remote_hwaddr == hwaddr) &&
2092*0Sstevel@tonic-gate (current->ref_cnt > 0))
2093*0Sstevel@tonic-gate break;
2094*0Sstevel@tonic-gate else
2095*0Sstevel@tonic-gate current = current->next_path;
2096*0Sstevel@tonic-gate }
2097*0Sstevel@tonic-gate if (current != NULL) {
2098*0Sstevel@tonic-gate mutex_enter(¤t->mutex);
2099*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(current);
2100*0Sstevel@tonic-gate }
2101*0Sstevel@tonic-gate
2102*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
2103*0Sstevel@tonic-gate
2104*0Sstevel@tonic-gate rsmka_release_adapter(adapter);
2105*0Sstevel@tonic-gate
2106*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_find_path done\n"));
2107*0Sstevel@tonic-gate
2108*0Sstevel@tonic-gate return (current);
2109*0Sstevel@tonic-gate }
2110*0Sstevel@tonic-gate
2111*0Sstevel@tonic-gate
2112*0Sstevel@tonic-gate /*
2113*0Sstevel@tonic-gate * Add the path to the head of the (per adapter) list of paths
2114*0Sstevel@tonic-gate */
2115*0Sstevel@tonic-gate static void
link_path(path_t * path)2116*0Sstevel@tonic-gate link_path(path_t *path)
2117*0Sstevel@tonic-gate {
2118*0Sstevel@tonic-gate
2119*0Sstevel@tonic-gate adapter_t *adapter = path->local_adapter;
2120*0Sstevel@tonic-gate path_t *first_path;
2121*0Sstevel@tonic-gate
2122*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_path enter\n"));
2123*0Sstevel@tonic-gate
2124*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
2125*0Sstevel@tonic-gate
2126*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
2127*0Sstevel@tonic-gate
2128*0Sstevel@tonic-gate first_path = adapter->next_path;
2129*0Sstevel@tonic-gate adapter->next_path = path;
2130*0Sstevel@tonic-gate path->next_path = first_path;
2131*0Sstevel@tonic-gate
2132*0Sstevel@tonic-gate adapter->listhead->path_count++;
2133*0Sstevel@tonic-gate
2134*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
2135*0Sstevel@tonic-gate
2136*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
2137*0Sstevel@tonic-gate
2138*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_path done\n"));
2139*0Sstevel@tonic-gate }
2140*0Sstevel@tonic-gate
2141*0Sstevel@tonic-gate /*
2142*0Sstevel@tonic-gate * Search the per-adapter list of paths for the specified path, beginning
2143*0Sstevel@tonic-gate * at the head of the list. Unlink the path and free the descriptor
2144*0Sstevel@tonic-gate * memory.
2145*0Sstevel@tonic-gate */
2146*0Sstevel@tonic-gate static void
destroy_path(path_t * path)2147*0Sstevel@tonic-gate destroy_path(path_t *path)
2148*0Sstevel@tonic-gate {
2149*0Sstevel@tonic-gate
2150*0Sstevel@tonic-gate adapter_t *adapter = path->local_adapter;
2151*0Sstevel@tonic-gate path_t *prev, *current;
2152*0Sstevel@tonic-gate
2153*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_path enter\n"));
2154*0Sstevel@tonic-gate
2155*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
2156*0Sstevel@tonic-gate
2157*0Sstevel@tonic-gate mutex_enter(&path->local_adapter->listhead->mutex);
2158*0Sstevel@tonic-gate ASSERT(path->ref_cnt == 0);
2159*0Sstevel@tonic-gate
2160*0Sstevel@tonic-gate /* start at the list head */
2161*0Sstevel@tonic-gate prev = NULL;
2162*0Sstevel@tonic-gate current = adapter->next_path;
2163*0Sstevel@tonic-gate
2164*0Sstevel@tonic-gate while (current != NULL) {
2165*0Sstevel@tonic-gate if (path->remote_node == current->remote_node &&
2166*0Sstevel@tonic-gate path->remote_hwaddr == current->remote_hwaddr)
2167*0Sstevel@tonic-gate break;
2168*0Sstevel@tonic-gate else {
2169*0Sstevel@tonic-gate prev = current;
2170*0Sstevel@tonic-gate current = current->next_path;
2171*0Sstevel@tonic-gate }
2172*0Sstevel@tonic-gate }
2173*0Sstevel@tonic-gate
2174*0Sstevel@tonic-gate if (prev == NULL)
2175*0Sstevel@tonic-gate adapter->next_path = current->next_path;
2176*0Sstevel@tonic-gate else
2177*0Sstevel@tonic-gate prev->next_path = current->next_path;
2178*0Sstevel@tonic-gate
2179*0Sstevel@tonic-gate path->local_adapter->listhead->path_count--;
2180*0Sstevel@tonic-gate
2181*0Sstevel@tonic-gate mutex_exit(&path->local_adapter->listhead->mutex);
2182*0Sstevel@tonic-gate
2183*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
2184*0Sstevel@tonic-gate
2185*0Sstevel@tonic-gate taskq_destroy(path->recv_taskq);
2186*0Sstevel@tonic-gate
2187*0Sstevel@tonic-gate kmem_free(path->msgbuf_queue,
2188*0Sstevel@tonic-gate RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t));
2189*0Sstevel@tonic-gate
2190*0Sstevel@tonic-gate mutex_destroy(¤t->mutex);
2191*0Sstevel@tonic-gate cv_destroy(¤t->sendq_token.sendq_cv);
2192*0Sstevel@tonic-gate cv_destroy(&path->hold_cv);
2193*0Sstevel@tonic-gate kmem_free(current, sizeof (path_t));
2194*0Sstevel@tonic-gate
2195*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_path done\n"));
2196*0Sstevel@tonic-gate }
2197*0Sstevel@tonic-gate
2198*0Sstevel@tonic-gate void
rsmka_enqueue_msgbuf(path_t * path,void * data)2199*0Sstevel@tonic-gate rsmka_enqueue_msgbuf(path_t *path, void *data)
2200*0Sstevel@tonic-gate {
2201*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2202*0Sstevel@tonic-gate "rsmka_enqueue_msgbuf enter\n"));
2203*0Sstevel@tonic-gate
2204*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
2205*0Sstevel@tonic-gate
2206*0Sstevel@tonic-gate ASSERT(path->msgbuf_cnt < RSMIPC_MAX_MESSAGES);
2207*0Sstevel@tonic-gate
2208*0Sstevel@tonic-gate /* increment the count and advance the tail */
2209*0Sstevel@tonic-gate
2210*0Sstevel@tonic-gate path->msgbuf_cnt++;
2211*0Sstevel@tonic-gate
2212*0Sstevel@tonic-gate if (path->msgbuf_tail == RSMIPC_MAX_MESSAGES - 1) {
2213*0Sstevel@tonic-gate path->msgbuf_tail = 0;
2214*0Sstevel@tonic-gate } else {
2215*0Sstevel@tonic-gate path->msgbuf_tail++;
2216*0Sstevel@tonic-gate }
2217*0Sstevel@tonic-gate
2218*0Sstevel@tonic-gate path->msgbuf_queue[path->msgbuf_tail].active = B_TRUE;
2219*0Sstevel@tonic-gate
2220*0Sstevel@tonic-gate bcopy(data, &(path->msgbuf_queue[path->msgbuf_tail].msg),
2221*0Sstevel@tonic-gate sizeof (rsmipc_request_t));
2222*0Sstevel@tonic-gate
2223*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2224*0Sstevel@tonic-gate "rsmka_enqueue_msgbuf done\n"));
2225*0Sstevel@tonic-gate
2226*0Sstevel@tonic-gate }
2227*0Sstevel@tonic-gate
2228*0Sstevel@tonic-gate /*
2229*0Sstevel@tonic-gate * get the head of the queue using rsmka_gethead_msgbuf and then call
2230*0Sstevel@tonic-gate * rsmka_dequeue_msgbuf to remove it.
2231*0Sstevel@tonic-gate */
2232*0Sstevel@tonic-gate void
rsmka_dequeue_msgbuf(path_t * path)2233*0Sstevel@tonic-gate rsmka_dequeue_msgbuf(path_t *path)
2234*0Sstevel@tonic-gate {
2235*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2236*0Sstevel@tonic-gate "rsmka_dequeue_msgbuf enter\n"));
2237*0Sstevel@tonic-gate
2238*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
2239*0Sstevel@tonic-gate
2240*0Sstevel@tonic-gate if (path->msgbuf_cnt == 0)
2241*0Sstevel@tonic-gate return;
2242*0Sstevel@tonic-gate
2243*0Sstevel@tonic-gate path->msgbuf_cnt--;
2244*0Sstevel@tonic-gate
2245*0Sstevel@tonic-gate path->msgbuf_queue[path->msgbuf_head].active = B_FALSE;
2246*0Sstevel@tonic-gate
2247*0Sstevel@tonic-gate if (path->msgbuf_head == RSMIPC_MAX_MESSAGES - 1) {
2248*0Sstevel@tonic-gate path->msgbuf_head = 0;
2249*0Sstevel@tonic-gate } else {
2250*0Sstevel@tonic-gate path->msgbuf_head++;
2251*0Sstevel@tonic-gate }
2252*0Sstevel@tonic-gate
2253*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2254*0Sstevel@tonic-gate "rsmka_dequeue_msgbuf done\n"));
2255*0Sstevel@tonic-gate
2256*0Sstevel@tonic-gate }
2257*0Sstevel@tonic-gate
2258*0Sstevel@tonic-gate msgbuf_elem_t *
rsmka_gethead_msgbuf(path_t * path)2259*0Sstevel@tonic-gate rsmka_gethead_msgbuf(path_t *path)
2260*0Sstevel@tonic-gate {
2261*0Sstevel@tonic-gate msgbuf_elem_t *head;
2262*0Sstevel@tonic-gate
2263*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&path->mutex));
2264*0Sstevel@tonic-gate
2265*0Sstevel@tonic-gate if (path->msgbuf_cnt == 0)
2266*0Sstevel@tonic-gate return (NULL);
2267*0Sstevel@tonic-gate
2268*0Sstevel@tonic-gate head = &path->msgbuf_queue[path->msgbuf_head];
2269*0Sstevel@tonic-gate
2270*0Sstevel@tonic-gate return (head);
2271*0Sstevel@tonic-gate
2272*0Sstevel@tonic-gate }
2273*0Sstevel@tonic-gate /*
2274*0Sstevel@tonic-gate * Called by rsm_connect which needs the hardware address of the
2275*0Sstevel@tonic-gate * remote adapter. A search is done through the paths for the local
2276*0Sstevel@tonic-gate * adapter for a match on the specified remote node.
2277*0Sstevel@tonic-gate */
2278*0Sstevel@tonic-gate rsm_node_id_t
get_remote_nodeid(adapter_t * adapter,rsm_addr_t remote_hwaddr)2279*0Sstevel@tonic-gate get_remote_nodeid(adapter_t *adapter, rsm_addr_t remote_hwaddr)
2280*0Sstevel@tonic-gate {
2281*0Sstevel@tonic-gate
2282*0Sstevel@tonic-gate rsm_node_id_t remote_node;
2283*0Sstevel@tonic-gate path_t *current = adapter->next_path;
2284*0Sstevel@tonic-gate
2285*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_nodeid enter\n"));
2286*0Sstevel@tonic-gate
2287*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
2288*0Sstevel@tonic-gate while (current != NULL) {
2289*0Sstevel@tonic-gate if (current->remote_hwaddr == remote_hwaddr) {
2290*0Sstevel@tonic-gate remote_node = current->remote_node;
2291*0Sstevel@tonic-gate break;
2292*0Sstevel@tonic-gate }
2293*0Sstevel@tonic-gate current = current->next_path;
2294*0Sstevel@tonic-gate }
2295*0Sstevel@tonic-gate
2296*0Sstevel@tonic-gate if (current == NULL)
2297*0Sstevel@tonic-gate remote_node = (rsm_node_id_t)-1;
2298*0Sstevel@tonic-gate
2299*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
2300*0Sstevel@tonic-gate
2301*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_nodeid done\n"));
2302*0Sstevel@tonic-gate
2303*0Sstevel@tonic-gate return (remote_node);
2304*0Sstevel@tonic-gate }
2305*0Sstevel@tonic-gate
2306*0Sstevel@tonic-gate /*
2307*0Sstevel@tonic-gate * Called by rsm_connect which needs the hardware address of the
2308*0Sstevel@tonic-gate * remote adapter. A search is done through the paths for the local
2309*0Sstevel@tonic-gate * adapter for a match on the specified remote node.
2310*0Sstevel@tonic-gate */
2311*0Sstevel@tonic-gate rsm_addr_t
get_remote_hwaddr(adapter_t * adapter,rsm_node_id_t remote_node)2312*0Sstevel@tonic-gate get_remote_hwaddr(adapter_t *adapter, rsm_node_id_t remote_node)
2313*0Sstevel@tonic-gate {
2314*0Sstevel@tonic-gate
2315*0Sstevel@tonic-gate rsm_addr_t remote_hwaddr;
2316*0Sstevel@tonic-gate path_t *current = adapter->next_path;
2317*0Sstevel@tonic-gate
2318*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_hwaddr enter\n"));
2319*0Sstevel@tonic-gate
2320*0Sstevel@tonic-gate mutex_enter(&adapter->listhead->mutex);
2321*0Sstevel@tonic-gate while (current != NULL) {
2322*0Sstevel@tonic-gate if (current->remote_node == remote_node) {
2323*0Sstevel@tonic-gate remote_hwaddr = current->remote_hwaddr;
2324*0Sstevel@tonic-gate break;
2325*0Sstevel@tonic-gate }
2326*0Sstevel@tonic-gate current = current->next_path;
2327*0Sstevel@tonic-gate }
2328*0Sstevel@tonic-gate if (current == NULL)
2329*0Sstevel@tonic-gate remote_hwaddr = -1;
2330*0Sstevel@tonic-gate mutex_exit(&adapter->listhead->mutex);
2331*0Sstevel@tonic-gate
2332*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_hwaddr done\n"));
2333*0Sstevel@tonic-gate
2334*0Sstevel@tonic-gate return (remote_hwaddr);
2335*0Sstevel@tonic-gate }
2336*0Sstevel@tonic-gate /*
2337*0Sstevel@tonic-gate * IPC UTILITY FUNCTIONS
2338*0Sstevel@tonic-gate */
2339*0Sstevel@tonic-gate
2340*0Sstevel@tonic-gate
2341*0Sstevel@tonic-gate /*
2342*0Sstevel@tonic-gate * If an entry exists, return with the ipc_info_lock held
2343*0Sstevel@tonic-gate */
2344*0Sstevel@tonic-gate static ipc_info_t *
lookup_ipc_info(rsm_node_id_t remote_node)2345*0Sstevel@tonic-gate lookup_ipc_info(rsm_node_id_t remote_node)
2346*0Sstevel@tonic-gate {
2347*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2348*0Sstevel@tonic-gate
2349*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_ipc_info enter\n"));
2350*0Sstevel@tonic-gate
2351*0Sstevel@tonic-gate mutex_enter(&ipc_info_lock);
2352*0Sstevel@tonic-gate
2353*0Sstevel@tonic-gate ipc_info = ipc_info_head;
2354*0Sstevel@tonic-gate if (ipc_info == NULL) {
2355*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2356*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2357*0Sstevel@tonic-gate "lookup_ipc_info done: ipc_info is NULL\n"));
2358*0Sstevel@tonic-gate return (NULL);
2359*0Sstevel@tonic-gate }
2360*0Sstevel@tonic-gate
2361*0Sstevel@tonic-gate while (ipc_info->remote_node != remote_node) {
2362*0Sstevel@tonic-gate ipc_info = ipc_info->next;
2363*0Sstevel@tonic-gate if (ipc_info == NULL) {
2364*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2365*0Sstevel@tonic-gate "lookup_ipc_info: ipc_info not found\n"));
2366*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2367*0Sstevel@tonic-gate break;
2368*0Sstevel@tonic-gate }
2369*0Sstevel@tonic-gate }
2370*0Sstevel@tonic-gate
2371*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_ipc_info done\n"));
2372*0Sstevel@tonic-gate
2373*0Sstevel@tonic-gate return (ipc_info);
2374*0Sstevel@tonic-gate }
2375*0Sstevel@tonic-gate
2376*0Sstevel@tonic-gate /*
2377*0Sstevel@tonic-gate * Create an ipc_info descriptor and return with ipc_info_lock held
2378*0Sstevel@tonic-gate */
2379*0Sstevel@tonic-gate static ipc_info_t *
init_ipc_info(rsm_node_id_t remote_node,boolean_t state)2380*0Sstevel@tonic-gate init_ipc_info(rsm_node_id_t remote_node, boolean_t state)
2381*0Sstevel@tonic-gate {
2382*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2383*0Sstevel@tonic-gate
2384*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_ipc_info enter\n"));
2385*0Sstevel@tonic-gate
2386*0Sstevel@tonic-gate /*
2387*0Sstevel@tonic-gate * allocate an ipc_info descriptor and add it to a
2388*0Sstevel@tonic-gate * singly linked list
2389*0Sstevel@tonic-gate */
2390*0Sstevel@tonic-gate
2391*0Sstevel@tonic-gate ipc_info = kmem_zalloc(sizeof (ipc_info_t), KM_SLEEP);
2392*0Sstevel@tonic-gate ipc_info->remote_node = remote_node;
2393*0Sstevel@tonic-gate ipc_info->node_is_alive = state;
2394*0Sstevel@tonic-gate
2395*0Sstevel@tonic-gate mutex_enter(&ipc_info_lock);
2396*0Sstevel@tonic-gate if (ipc_info_head == NULL) {
2397*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2398*0Sstevel@tonic-gate "init_ipc_info:ipc_info_head = %lx\n", ipc_info));
2399*0Sstevel@tonic-gate ipc_info_head = ipc_info;
2400*0Sstevel@tonic-gate ipc_info->next = NULL;
2401*0Sstevel@tonic-gate } else {
2402*0Sstevel@tonic-gate ipc_info->next = ipc_info_head;
2403*0Sstevel@tonic-gate ipc_info_head = ipc_info;
2404*0Sstevel@tonic-gate }
2405*0Sstevel@tonic-gate
2406*0Sstevel@tonic-gate ipc_info->remote_node = remote_node;
2407*0Sstevel@tonic-gate
2408*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_ipc_info done\n"));
2409*0Sstevel@tonic-gate
2410*0Sstevel@tonic-gate return (ipc_info);
2411*0Sstevel@tonic-gate }
2412*0Sstevel@tonic-gate
2413*0Sstevel@tonic-gate static void
destroy_ipc_info(ipc_info_t * ipc_info)2414*0Sstevel@tonic-gate destroy_ipc_info(ipc_info_t *ipc_info)
2415*0Sstevel@tonic-gate {
2416*0Sstevel@tonic-gate ipc_info_t *current = ipc_info_head;
2417*0Sstevel@tonic-gate ipc_info_t *prev;
2418*0Sstevel@tonic-gate
2419*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_ipc_info enter\n"));
2420*0Sstevel@tonic-gate
2421*0Sstevel@tonic-gate ASSERT(MUTEX_HELD(&ipc_info_lock));
2422*0Sstevel@tonic-gate
2423*0Sstevel@tonic-gate while (current != ipc_info) {
2424*0Sstevel@tonic-gate prev = current;
2425*0Sstevel@tonic-gate current = current->next;
2426*0Sstevel@tonic-gate }
2427*0Sstevel@tonic-gate ASSERT(current != NULL);
2428*0Sstevel@tonic-gate
2429*0Sstevel@tonic-gate if (current != ipc_info_head)
2430*0Sstevel@tonic-gate prev->next = current->next;
2431*0Sstevel@tonic-gate else
2432*0Sstevel@tonic-gate ipc_info_head = current->next;
2433*0Sstevel@tonic-gate
2434*0Sstevel@tonic-gate kmem_free(current, sizeof (ipc_info_t));
2435*0Sstevel@tonic-gate
2436*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_ipc_info done\n"));
2437*0Sstevel@tonic-gate
2438*0Sstevel@tonic-gate }
2439*0Sstevel@tonic-gate
2440*0Sstevel@tonic-gate /*
2441*0Sstevel@tonic-gate * Sendq tokens are kept on a circular list. If tokens A, B, C, & D are
2442*0Sstevel@tonic-gate * on the list headed by ipc_info, then ipc_info points to A, A points to
2443*0Sstevel@tonic-gate * D, D to C, C to B, and B to A.
2444*0Sstevel@tonic-gate */
2445*0Sstevel@tonic-gate static void
link_sendq_token(sendq_token_t * token,rsm_node_id_t remote_node)2446*0Sstevel@tonic-gate link_sendq_token(sendq_token_t *token, rsm_node_id_t remote_node)
2447*0Sstevel@tonic-gate {
2448*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2449*0Sstevel@tonic-gate
2450*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_sendq_token enter\n"));
2451*0Sstevel@tonic-gate
2452*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
2453*0Sstevel@tonic-gate if (ipc_info == NULL) {
2454*0Sstevel@tonic-gate ipc_info = init_ipc_info(remote_node, B_FALSE);
2455*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2456*0Sstevel@tonic-gate "link_sendq_token: new ipc_info = %lx\n", ipc_info));
2457*0Sstevel@tonic-gate }
2458*0Sstevel@tonic-gate else
2459*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2460*0Sstevel@tonic-gate "link_sendq_token: ipc_info = %lx\n", ipc_info));
2461*0Sstevel@tonic-gate
2462*0Sstevel@tonic-gate if (ipc_info->token_list == NULL) {
2463*0Sstevel@tonic-gate ipc_info->token_list = token;
2464*0Sstevel@tonic-gate ipc_info->current_token = token;
2465*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2466*0Sstevel@tonic-gate "link_sendq_token: current = %lx\n", token));
2467*0Sstevel@tonic-gate token->next = token;
2468*0Sstevel@tonic-gate } else {
2469*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2470*0Sstevel@tonic-gate "link_sendq_token: token = %lx\n", token));
2471*0Sstevel@tonic-gate token->next = ipc_info->token_list->next;
2472*0Sstevel@tonic-gate ipc_info->token_list->next = token;
2473*0Sstevel@tonic-gate ipc_info->token_list = token;
2474*0Sstevel@tonic-gate }
2475*0Sstevel@tonic-gate
2476*0Sstevel@tonic-gate
2477*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2478*0Sstevel@tonic-gate
2479*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_sendq_token done\n"));
2480*0Sstevel@tonic-gate
2481*0Sstevel@tonic-gate }
2482*0Sstevel@tonic-gate
2483*0Sstevel@tonic-gate static void
unlink_sendq_token(sendq_token_t * token,rsm_node_id_t remote_node)2484*0Sstevel@tonic-gate unlink_sendq_token(sendq_token_t *token, rsm_node_id_t remote_node)
2485*0Sstevel@tonic-gate {
2486*0Sstevel@tonic-gate sendq_token_t *prev, *start, *current;
2487*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2488*0Sstevel@tonic-gate path_t *path = SQ_TOKEN_TO_PATH(token);
2489*0Sstevel@tonic-gate
2490*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2491*0Sstevel@tonic-gate "unlink_sendq_token enter\n"));
2492*0Sstevel@tonic-gate
2493*0Sstevel@tonic-gate ASSERT(path->ref_cnt == 0);
2494*0Sstevel@tonic-gate
2495*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
2496*0Sstevel@tonic-gate if (ipc_info == NULL) {
2497*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2498*0Sstevel@tonic-gate "ipc_info for %d not found\n", remote_node));
2499*0Sstevel@tonic-gate return;
2500*0Sstevel@tonic-gate }
2501*0Sstevel@tonic-gate
2502*0Sstevel@tonic-gate prev = ipc_info->token_list;
2503*0Sstevel@tonic-gate start = current = ipc_info->token_list->next;
2504*0Sstevel@tonic-gate
2505*0Sstevel@tonic-gate for (;;) {
2506*0Sstevel@tonic-gate if (current == token) {
2507*0Sstevel@tonic-gate if (current->next != current) {
2508*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2509*0Sstevel@tonic-gate "found token, removed it\n"));
2510*0Sstevel@tonic-gate prev->next = token->next;
2511*0Sstevel@tonic-gate if (ipc_info->token_list == token)
2512*0Sstevel@tonic-gate ipc_info->token_list = prev;
2513*0Sstevel@tonic-gate ipc_info->current_token = token->next;
2514*0Sstevel@tonic-gate } else {
2515*0Sstevel@tonic-gate /* list will be empty */
2516*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2517*0Sstevel@tonic-gate "removed token, list empty\n"));
2518*0Sstevel@tonic-gate ipc_info->token_list = NULL;
2519*0Sstevel@tonic-gate ipc_info->current_token = NULL;
2520*0Sstevel@tonic-gate }
2521*0Sstevel@tonic-gate break;
2522*0Sstevel@tonic-gate }
2523*0Sstevel@tonic-gate prev = current;
2524*0Sstevel@tonic-gate current = current->next;
2525*0Sstevel@tonic-gate if (current == start) {
2526*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2527*0Sstevel@tonic-gate "unlink_sendq_token: token not found\n"));
2528*0Sstevel@tonic-gate break;
2529*0Sstevel@tonic-gate }
2530*0Sstevel@tonic-gate }
2531*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2532*0Sstevel@tonic-gate
2533*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "unlink_sendq_token done\n"));
2534*0Sstevel@tonic-gate }
2535*0Sstevel@tonic-gate
2536*0Sstevel@tonic-gate
2537*0Sstevel@tonic-gate void
rele_sendq_token(sendq_token_t * token)2538*0Sstevel@tonic-gate rele_sendq_token(sendq_token_t *token)
2539*0Sstevel@tonic-gate {
2540*0Sstevel@tonic-gate path_t *path;
2541*0Sstevel@tonic-gate
2542*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rele_sendq_token enter\n"));
2543*0Sstevel@tonic-gate
2544*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(token);
2545*0Sstevel@tonic-gate mutex_enter(&path->mutex);
2546*0Sstevel@tonic-gate PATH_RELE_NOLOCK(path);
2547*0Sstevel@tonic-gate SENDQ_TOKEN_RELE(path);
2548*0Sstevel@tonic-gate mutex_exit(&path->mutex);
2549*0Sstevel@tonic-gate
2550*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rele_sendq_token done\n"));
2551*0Sstevel@tonic-gate
2552*0Sstevel@tonic-gate }
2553*0Sstevel@tonic-gate
2554*0Sstevel@tonic-gate /*
2555*0Sstevel@tonic-gate * A valid ipc token can only be returned if the remote node is alive.
2556*0Sstevel@tonic-gate * Tokens are on a circular list. Starting with the current token
2557*0Sstevel@tonic-gate * search for a token with an endpoint in state RSM_PATH_ACTIVE.
2558*0Sstevel@tonic-gate * rsmipc_send which calls rsmka_get_sendq_token expects that if there are
2559*0Sstevel@tonic-gate * multiple paths available between a node-pair then consecutive calls from
2560*0Sstevel@tonic-gate * a particular invocation of rsmipc_send will return a sendq that is
2561*0Sstevel@tonic-gate * different from the one that was used in the previous iteration. When
2562*0Sstevel@tonic-gate * prev_used is NULL it indicates that this is the first interation in a
2563*0Sstevel@tonic-gate * specific rsmipc_send invocation.
2564*0Sstevel@tonic-gate *
2565*0Sstevel@tonic-gate * Updating the current token provides round robin selection and this
2566*0Sstevel@tonic-gate * is done only in the first iteration ie. when prev_used is NULL
2567*0Sstevel@tonic-gate */
2568*0Sstevel@tonic-gate sendq_token_t *
rsmka_get_sendq_token(rsm_node_id_t remote_node,sendq_token_t * prev_used)2569*0Sstevel@tonic-gate rsmka_get_sendq_token(rsm_node_id_t remote_node, sendq_token_t *prev_used)
2570*0Sstevel@tonic-gate {
2571*0Sstevel@tonic-gate sendq_token_t *token, *first_token;
2572*0Sstevel@tonic-gate path_t *path;
2573*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2574*0Sstevel@tonic-gate
2575*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2576*0Sstevel@tonic-gate "rsmka_get_sendq_token enter\n"));
2577*0Sstevel@tonic-gate
2578*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
2579*0Sstevel@tonic-gate if (ipc_info == NULL) {
2580*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2581*0Sstevel@tonic-gate "rsmka_get_sendq_token done: ipc_info is NULL\n"));
2582*0Sstevel@tonic-gate return (NULL);
2583*0Sstevel@tonic-gate }
2584*0Sstevel@tonic-gate
2585*0Sstevel@tonic-gate if (ipc_info->node_is_alive == B_TRUE) {
2586*0Sstevel@tonic-gate token = first_token = ipc_info->current_token;
2587*0Sstevel@tonic-gate if (token == NULL) {
2588*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2589*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2590*0Sstevel@tonic-gate "rsmka_get_sendq_token done: token=NULL\n"));
2591*0Sstevel@tonic-gate return (NULL);
2592*0Sstevel@tonic-gate }
2593*0Sstevel@tonic-gate
2594*0Sstevel@tonic-gate for (;;) {
2595*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(token);
2596*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2597*0Sstevel@tonic-gate "path %lx\n", path));
2598*0Sstevel@tonic-gate mutex_enter(&path->mutex);
2599*0Sstevel@tonic-gate if (path->state != RSMKA_PATH_ACTIVE ||
2600*0Sstevel@tonic-gate path->ref_cnt == 0) {
2601*0Sstevel@tonic-gate mutex_exit(&path->mutex);
2602*0Sstevel@tonic-gate } else {
2603*0Sstevel@tonic-gate if (token != prev_used) {
2604*0Sstevel@tonic-gate /* found a new token */
2605*0Sstevel@tonic-gate break;
2606*0Sstevel@tonic-gate }
2607*0Sstevel@tonic-gate mutex_exit(&path->mutex);
2608*0Sstevel@tonic-gate }
2609*0Sstevel@tonic-gate
2610*0Sstevel@tonic-gate token = token->next;
2611*0Sstevel@tonic-gate if (token == first_token) {
2612*0Sstevel@tonic-gate /*
2613*0Sstevel@tonic-gate * we didn't find a new token reuse prev_used
2614*0Sstevel@tonic-gate * if the corresponding path is still up
2615*0Sstevel@tonic-gate */
2616*0Sstevel@tonic-gate if (prev_used) {
2617*0Sstevel@tonic-gate path = SQ_TOKEN_TO_PATH(prev_used);
2618*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2619*0Sstevel@tonic-gate "path %lx\n", path));
2620*0Sstevel@tonic-gate mutex_enter(&path->mutex);
2621*0Sstevel@tonic-gate if (path->state != RSMKA_PATH_ACTIVE ||
2622*0Sstevel@tonic-gate path->ref_cnt == 0) {
2623*0Sstevel@tonic-gate mutex_exit(&path->mutex);
2624*0Sstevel@tonic-gate } else {
2625*0Sstevel@tonic-gate token = prev_used;
2626*0Sstevel@tonic-gate break;
2627*0Sstevel@tonic-gate }
2628*0Sstevel@tonic-gate }
2629*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2630*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2631*0Sstevel@tonic-gate "rsmka_get_sendq_token: token=NULL\n"));
2632*0Sstevel@tonic-gate return (NULL);
2633*0Sstevel@tonic-gate }
2634*0Sstevel@tonic-gate }
2635*0Sstevel@tonic-gate
2636*0Sstevel@tonic-gate PATH_HOLD_NOLOCK(path);
2637*0Sstevel@tonic-gate SENDQ_TOKEN_HOLD(path);
2638*0Sstevel@tonic-gate if (prev_used == NULL) {
2639*0Sstevel@tonic-gate /* change current_token only the first time */
2640*0Sstevel@tonic-gate ipc_info->current_token = token->next;
2641*0Sstevel@tonic-gate }
2642*0Sstevel@tonic-gate
2643*0Sstevel@tonic-gate mutex_exit(&path->mutex);
2644*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2645*0Sstevel@tonic-gate
2646*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2647*0Sstevel@tonic-gate "rsmka_get_sendq_token done\n"));
2648*0Sstevel@tonic-gate return (token);
2649*0Sstevel@tonic-gate } else {
2650*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2651*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2652*0Sstevel@tonic-gate "rsmka_get_sendq_token done\n"));
2653*0Sstevel@tonic-gate return (NULL);
2654*0Sstevel@tonic-gate }
2655*0Sstevel@tonic-gate }
2656*0Sstevel@tonic-gate
2657*0Sstevel@tonic-gate
2658*0Sstevel@tonic-gate
2659*0Sstevel@tonic-gate /*
2660*0Sstevel@tonic-gate */
2661*0Sstevel@tonic-gate static int
create_ipc_sendq(path_t * path)2662*0Sstevel@tonic-gate create_ipc_sendq(path_t *path)
2663*0Sstevel@tonic-gate {
2664*0Sstevel@tonic-gate int rval;
2665*0Sstevel@tonic-gate sendq_token_t *token;
2666*0Sstevel@tonic-gate adapter_t *adapter;
2667*0Sstevel@tonic-gate int64_t srvc_offset;
2668*0Sstevel@tonic-gate
2669*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "create_ipc_sendq enter\n"));
2670*0Sstevel@tonic-gate
2671*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: path = %lx\n",
2672*0Sstevel@tonic-gate path));
2673*0Sstevel@tonic-gate
2674*0Sstevel@tonic-gate adapter = path->local_adapter;
2675*0Sstevel@tonic-gate token = &path->sendq_token;
2676*0Sstevel@tonic-gate
2677*0Sstevel@tonic-gate srvc_offset = path->remote_hwaddr;
2678*0Sstevel@tonic-gate
2679*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2680*0Sstevel@tonic-gate "create_ipc_sendq: srvc_offset = %lld\n",
2681*0Sstevel@tonic-gate srvc_offset));
2682*0Sstevel@tonic-gate
2683*0Sstevel@tonic-gate rval = adapter->rsmpi_ops->rsm_sendq_create(adapter->rsmpi_handle,
2684*0Sstevel@tonic-gate path->remote_hwaddr,
2685*0Sstevel@tonic-gate (rsm_intr_service_t)(RSM_SERVICE+srvc_offset),
2686*0Sstevel@tonic-gate (rsm_intr_pri_t)RSM_PRI, (size_t)RSM_QUEUE_SZ,
2687*0Sstevel@tonic-gate RSM_INTR_SEND_Q_NO_FENCE,
2688*0Sstevel@tonic-gate RSM_RESOURCE_SLEEP, NULL, &token->rsmpi_sendq_handle);
2689*0Sstevel@tonic-gate if (rval == RSM_SUCCESS) {
2690*0Sstevel@tonic-gate /* rsmipc_send() may be waiting for a sendq_token */
2691*0Sstevel@tonic-gate mutex_enter(&ipc_info_cvlock);
2692*0Sstevel@tonic-gate cv_broadcast(&ipc_info_cv);
2693*0Sstevel@tonic-gate mutex_exit(&ipc_info_cvlock);
2694*0Sstevel@tonic-gate }
2695*0Sstevel@tonic-gate
2696*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: handle = %lx\n",
2697*0Sstevel@tonic-gate token->rsmpi_sendq_handle));
2698*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: rval = %d\n",
2699*0Sstevel@tonic-gate rval));
2700*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "create_ipc_sendq done\n"));
2701*0Sstevel@tonic-gate
2702*0Sstevel@tonic-gate return (rval);
2703*0Sstevel@tonic-gate }
2704*0Sstevel@tonic-gate
2705*0Sstevel@tonic-gate
2706*0Sstevel@tonic-gate boolean_t
rsmka_check_node_alive(rsm_node_id_t remote_node)2707*0Sstevel@tonic-gate rsmka_check_node_alive(rsm_node_id_t remote_node)
2708*0Sstevel@tonic-gate {
2709*0Sstevel@tonic-gate ipc_info_t *ipc_info;
2710*0Sstevel@tonic-gate
2711*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG, "rsmka_check_node_alive enter\n"));
2712*0Sstevel@tonic-gate
2713*0Sstevel@tonic-gate ipc_info = lookup_ipc_info(remote_node);
2714*0Sstevel@tonic-gate if (ipc_info == NULL) {
2715*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2716*0Sstevel@tonic-gate "rsmka_check_node_alive done: ipc_info NULL\n"));
2717*0Sstevel@tonic-gate return (B_FALSE);
2718*0Sstevel@tonic-gate }
2719*0Sstevel@tonic-gate
2720*0Sstevel@tonic-gate if (ipc_info->node_is_alive == B_TRUE) {
2721*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2722*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2723*0Sstevel@tonic-gate "rsmka_check_node_alive done: node is alive\n"));
2724*0Sstevel@tonic-gate return (B_TRUE);
2725*0Sstevel@tonic-gate } else {
2726*0Sstevel@tonic-gate mutex_exit(&ipc_info_lock);
2727*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG,
2728*0Sstevel@tonic-gate "rsmka_check_node_alive done: node is not alive\n"));
2729*0Sstevel@tonic-gate return (B_FALSE);
2730*0Sstevel@tonic-gate }
2731*0Sstevel@tonic-gate }
2732*0Sstevel@tonic-gate
2733*0Sstevel@tonic-gate
2734*0Sstevel@tonic-gate
2735*0Sstevel@tonic-gate
2736*0Sstevel@tonic-gate /*
2737*0Sstevel@tonic-gate * TOPOLOGY IOCTL SUPPORT
2738*0Sstevel@tonic-gate */
2739*0Sstevel@tonic-gate
2740*0Sstevel@tonic-gate static uint32_t
get_topology_size(int mode)2741*0Sstevel@tonic-gate get_topology_size(int mode)
2742*0Sstevel@tonic-gate {
2743*0Sstevel@tonic-gate uint32_t topology_size;
2744*0Sstevel@tonic-gate int pointer_area_size;
2745*0Sstevel@tonic-gate adapter_listhead_t *listhead;
2746*0Sstevel@tonic-gate int total_num_of_adapters;
2747*0Sstevel@tonic-gate int total_num_of_paths;
2748*0Sstevel@tonic-gate
2749*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology_size enter\n"));
2750*0Sstevel@tonic-gate
2751*0Sstevel@tonic-gate /*
2752*0Sstevel@tonic-gate * Find the total number of adapters and paths by adding up the
2753*0Sstevel@tonic-gate * individual adapter and path counts from all the listheads
2754*0Sstevel@tonic-gate */
2755*0Sstevel@tonic-gate total_num_of_adapters = 0;
2756*0Sstevel@tonic-gate total_num_of_paths = 0;
2757*0Sstevel@tonic-gate listhead = adapter_listhead_base.next;
2758*0Sstevel@tonic-gate while (listhead != NULL) {
2759*0Sstevel@tonic-gate total_num_of_adapters += listhead->adapter_count;
2760*0Sstevel@tonic-gate total_num_of_paths += listhead->path_count;
2761*0Sstevel@tonic-gate listhead = listhead->next_listhead;
2762*0Sstevel@tonic-gate }
2763*0Sstevel@tonic-gate
2764*0Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
2765*0Sstevel@tonic-gate if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32)
2766*0Sstevel@tonic-gate /*
2767*0Sstevel@tonic-gate * Add extra 4-bytes to make sure connections header
2768*0Sstevel@tonic-gate * is double-word aligned
2769*0Sstevel@tonic-gate */
2770*0Sstevel@tonic-gate pointer_area_size =
2771*0Sstevel@tonic-gate (total_num_of_adapters + total_num_of_adapters%2) *
2772*0Sstevel@tonic-gate sizeof (caddr32_t);
2773*0Sstevel@tonic-gate else
2774*0Sstevel@tonic-gate pointer_area_size = total_num_of_adapters * sizeof (caddr_t);
2775*0Sstevel@tonic-gate #else /* _MULTI_DATAMODEL */
2776*0Sstevel@tonic-gate mode = mode;
2777*0Sstevel@tonic-gate pointer_area_size = total_num_of_adapters * sizeof (caddr_t);
2778*0Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
2779*0Sstevel@tonic-gate
2780*0Sstevel@tonic-gate
2781*0Sstevel@tonic-gate topology_size = sizeof (rsmka_topology_hdr_t) +
2782*0Sstevel@tonic-gate pointer_area_size +
2783*0Sstevel@tonic-gate (total_num_of_adapters * sizeof (rsmka_connections_hdr_t)) +
2784*0Sstevel@tonic-gate (total_num_of_paths * sizeof (rsmka_remote_cntlr_t));
2785*0Sstevel@tonic-gate
2786*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology_size done\n"));
2787*0Sstevel@tonic-gate
2788*0Sstevel@tonic-gate return (topology_size);
2789*0Sstevel@tonic-gate }
2790*0Sstevel@tonic-gate
2791*0Sstevel@tonic-gate
2792*0Sstevel@tonic-gate
2793*0Sstevel@tonic-gate static void
get_topology(caddr_t arg,char * bufp,int mode)2794*0Sstevel@tonic-gate get_topology(caddr_t arg, char *bufp, int mode)
2795*0Sstevel@tonic-gate {
2796*0Sstevel@tonic-gate
2797*0Sstevel@tonic-gate rsmka_topology_t *tp = (rsmka_topology_t *)bufp;
2798*0Sstevel@tonic-gate adapter_listhead_t *listhead;
2799*0Sstevel@tonic-gate adapter_t *adapter;
2800*0Sstevel@tonic-gate path_t *path;
2801*0Sstevel@tonic-gate int cntlr = 0;
2802*0Sstevel@tonic-gate rsmka_connections_t *connection;
2803*0Sstevel@tonic-gate rsmka_remote_cntlr_t *rem_cntlr;
2804*0Sstevel@tonic-gate int total_num_of_adapters;
2805*0Sstevel@tonic-gate
2806*0Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
2807*0Sstevel@tonic-gate rsmka_topology32_t *tp32 = (rsmka_topology32_t *)bufp;
2808*0Sstevel@tonic-gate #else
2809*0Sstevel@tonic-gate mode = mode;
2810*0Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
2811*0Sstevel@tonic-gate
2812*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology enter\n"));
2813*0Sstevel@tonic-gate
2814*0Sstevel@tonic-gate /*
2815*0Sstevel@tonic-gate * Find the total number of adapters by adding up the
2816*0Sstevel@tonic-gate * individual adapter counts from all the listheads
2817*0Sstevel@tonic-gate */
2818*0Sstevel@tonic-gate total_num_of_adapters = 0;
2819*0Sstevel@tonic-gate listhead = adapter_listhead_base.next;
2820*0Sstevel@tonic-gate while (listhead != NULL) {
2821*0Sstevel@tonic-gate total_num_of_adapters += listhead->adapter_count;
2822*0Sstevel@tonic-gate listhead = listhead->next_listhead;
2823*0Sstevel@tonic-gate }
2824*0Sstevel@tonic-gate
2825*0Sstevel@tonic-gate /* fill topology header and adjust bufp */
2826*0Sstevel@tonic-gate tp->topology_hdr.local_nodeid = my_nodeid;
2827*0Sstevel@tonic-gate tp->topology_hdr.local_cntlr_count = total_num_of_adapters;
2828*0Sstevel@tonic-gate bufp = (char *)&tp->connections[0];
2829*0Sstevel@tonic-gate
2830*0Sstevel@tonic-gate /* leave room for connection pointer area */
2831*0Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
2832*0Sstevel@tonic-gate if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32)
2833*0Sstevel@tonic-gate /* make sure bufp is double-word aligned */
2834*0Sstevel@tonic-gate bufp += (total_num_of_adapters + total_num_of_adapters%2) *
2835*0Sstevel@tonic-gate sizeof (caddr32_t);
2836*0Sstevel@tonic-gate else
2837*0Sstevel@tonic-gate bufp += total_num_of_adapters * sizeof (caddr_t);
2838*0Sstevel@tonic-gate #else /* _MULTI_DATAMODEL */
2839*0Sstevel@tonic-gate bufp += total_num_of_adapters * sizeof (caddr_t);
2840*0Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
2841*0Sstevel@tonic-gate
2842*0Sstevel@tonic-gate /* fill topology from the adapter and path data */
2843*0Sstevel@tonic-gate listhead = adapter_listhead_base.next;
2844*0Sstevel@tonic-gate while (listhead != NULL) {
2845*0Sstevel@tonic-gate adapter = listhead->next_adapter;
2846*0Sstevel@tonic-gate while (adapter != NULL) {
2847*0Sstevel@tonic-gate /* fill in user based connection pointer */
2848*0Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
2849*0Sstevel@tonic-gate if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2850*0Sstevel@tonic-gate ulong_t delta = (ulong_t)bufp - (ulong_t)tp32;
2851*0Sstevel@tonic-gate caddr32_t userbase = (caddr32_t)((ulong_t)arg &
2852*0Sstevel@tonic-gate 0xffffffff);
2853*0Sstevel@tonic-gate tp32->connections[cntlr++] = userbase + delta;
2854*0Sstevel@tonic-gate } else {
2855*0Sstevel@tonic-gate tp->connections[cntlr++] = arg +
2856*0Sstevel@tonic-gate (ulong_t)bufp -
2857*0Sstevel@tonic-gate (ulong_t)tp;
2858*0Sstevel@tonic-gate }
2859*0Sstevel@tonic-gate #else /* _MULTI_DATAMODEL */
2860*0Sstevel@tonic-gate tp->connections[cntlr++] = arg +
2861*0Sstevel@tonic-gate (ulong_t)bufp -
2862*0Sstevel@tonic-gate (ulong_t)tp;
2863*0Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
2864*0Sstevel@tonic-gate connection = (rsmka_connections_t *)bufp;
2865*0Sstevel@tonic-gate (void) snprintf(connection->hdr.cntlr_name,
2866*0Sstevel@tonic-gate MAXNAMELEN, "%s%d",
2867*0Sstevel@tonic-gate listhead->adapter_devname,
2868*0Sstevel@tonic-gate adapter->instance);
2869*0Sstevel@tonic-gate connection->hdr.local_hwaddr = adapter->hwaddr;
2870*0Sstevel@tonic-gate connection->hdr.remote_cntlr_count = 0;
2871*0Sstevel@tonic-gate bufp += sizeof (rsmka_connections_hdr_t);
2872*0Sstevel@tonic-gate rem_cntlr = (rsmka_remote_cntlr_t *)bufp;
2873*0Sstevel@tonic-gate path = adapter->next_path;
2874*0Sstevel@tonic-gate while (path != NULL) {
2875*0Sstevel@tonic-gate connection->hdr.remote_cntlr_count++;
2876*0Sstevel@tonic-gate rem_cntlr->remote_nodeid = path->remote_node;
2877*0Sstevel@tonic-gate (void) snprintf(rem_cntlr->remote_cntlrname,
2878*0Sstevel@tonic-gate MAXNAMELEN, "%s%d",
2879*0Sstevel@tonic-gate listhead->adapter_devname,
2880*0Sstevel@tonic-gate path->remote_devinst);
2881*0Sstevel@tonic-gate rem_cntlr->remote_hwaddr = path->remote_hwaddr;
2882*0Sstevel@tonic-gate rem_cntlr->connection_state = path->state;
2883*0Sstevel@tonic-gate ++rem_cntlr;
2884*0Sstevel@tonic-gate path = path->next_path;
2885*0Sstevel@tonic-gate }
2886*0Sstevel@tonic-gate adapter = adapter->next;
2887*0Sstevel@tonic-gate bufp = (char *)rem_cntlr;
2888*0Sstevel@tonic-gate }
2889*0Sstevel@tonic-gate listhead = listhead->next_listhead;
2890*0Sstevel@tonic-gate }
2891*0Sstevel@tonic-gate
2892*0Sstevel@tonic-gate DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology done\n"));
2893*0Sstevel@tonic-gate
2894*0Sstevel@tonic-gate }
2895*0Sstevel@tonic-gate
2896*0Sstevel@tonic-gate
2897*0Sstevel@tonic-gate /*
2898*0Sstevel@tonic-gate * Called from rsm_ioctl() in rsm.c
2899*0Sstevel@tonic-gate * Make sure there is no possiblity of blocking while holding
2900*0Sstevel@tonic-gate * adapter_listhead_base.lock
2901*0Sstevel@tonic-gate */
2902*0Sstevel@tonic-gate int
rsmka_topology_ioctl(caddr_t arg,int cmd,int mode)2903*0Sstevel@tonic-gate rsmka_topology_ioctl(caddr_t arg, int cmd, int mode)
2904*0Sstevel@tonic-gate {
2905*0Sstevel@tonic-gate uint32_t topology_size;
2906*0Sstevel@tonic-gate uint32_t request_size;
2907*0Sstevel@tonic-gate char *bufp;
2908*0Sstevel@tonic-gate int error = RSM_SUCCESS;
2909*0Sstevel@tonic-gate size_t max_toposize;
2910*0Sstevel@tonic-gate
2911*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2912*0Sstevel@tonic-gate "rsmka_topology_ioctl enter\n"));
2913*0Sstevel@tonic-gate
2914*0Sstevel@tonic-gate switch (cmd) {
2915*0Sstevel@tonic-gate case RSM_IOCTL_TOPOLOGY_SIZE:
2916*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
2917*0Sstevel@tonic-gate topology_size = get_topology_size(mode);
2918*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
2919*0Sstevel@tonic-gate if (ddi_copyout((caddr_t)&topology_size,
2920*0Sstevel@tonic-gate (caddr_t)arg, sizeof (uint32_t), mode))
2921*0Sstevel@tonic-gate error = RSMERR_BAD_ADDR;
2922*0Sstevel@tonic-gate break;
2923*0Sstevel@tonic-gate case RSM_IOCTL_TOPOLOGY_DATA:
2924*0Sstevel@tonic-gate /*
2925*0Sstevel@tonic-gate * The size of the buffer which the caller has allocated
2926*0Sstevel@tonic-gate * is passed in. If the size needed for the topology data
2927*0Sstevel@tonic-gate * is not sufficient, E2BIG is returned
2928*0Sstevel@tonic-gate */
2929*0Sstevel@tonic-gate if (ddi_copyin(arg, &request_size, sizeof (uint32_t), mode)) {
2930*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2931*0Sstevel@tonic-gate "rsmka_topology_ioctl done: BAD_ADDR\n"));
2932*0Sstevel@tonic-gate return (RSMERR_BAD_ADDR);
2933*0Sstevel@tonic-gate }
2934*0Sstevel@tonic-gate /* calculate the max size of the topology structure */
2935*0Sstevel@tonic-gate max_toposize = sizeof (rsmka_topology_hdr_t) +
2936*0Sstevel@tonic-gate RSM_MAX_CTRL * (sizeof (caddr_t) +
2937*0Sstevel@tonic-gate sizeof (rsmka_connections_hdr_t)) +
2938*0Sstevel@tonic-gate RSM_MAX_NODE * sizeof (rsmka_remote_cntlr_t);
2939*0Sstevel@tonic-gate
2940*0Sstevel@tonic-gate if (request_size > max_toposize) { /* validate request_size */
2941*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2942*0Sstevel@tonic-gate "rsmka_topology_ioctl done: size too large\n"));
2943*0Sstevel@tonic-gate return (EINVAL);
2944*0Sstevel@tonic-gate }
2945*0Sstevel@tonic-gate bufp = kmem_zalloc(request_size, KM_SLEEP);
2946*0Sstevel@tonic-gate mutex_enter(&adapter_listhead_base.listlock);
2947*0Sstevel@tonic-gate topology_size = get_topology_size(mode);
2948*0Sstevel@tonic-gate if (request_size < topology_size) {
2949*0Sstevel@tonic-gate kmem_free(bufp, request_size);
2950*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
2951*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2952*0Sstevel@tonic-gate "rsmka_topology_ioctl done: E2BIG\n"));
2953*0Sstevel@tonic-gate return (E2BIG);
2954*0Sstevel@tonic-gate }
2955*0Sstevel@tonic-gate
2956*0Sstevel@tonic-gate /* get the topology data and copyout to the caller */
2957*0Sstevel@tonic-gate get_topology(arg, bufp, mode);
2958*0Sstevel@tonic-gate mutex_exit(&adapter_listhead_base.listlock);
2959*0Sstevel@tonic-gate if (ddi_copyout((caddr_t)bufp, (caddr_t)arg,
2960*0Sstevel@tonic-gate topology_size, mode))
2961*0Sstevel@tonic-gate error = RSMERR_BAD_ADDR;
2962*0Sstevel@tonic-gate
2963*0Sstevel@tonic-gate kmem_free(bufp, request_size);
2964*0Sstevel@tonic-gate break;
2965*0Sstevel@tonic-gate default:
2966*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG,
2967*0Sstevel@tonic-gate "rsmka_topology_ioctl: cmd not supported\n"));
2968*0Sstevel@tonic-gate error = DDI_FAILURE;
2969*0Sstevel@tonic-gate }
2970*0Sstevel@tonic-gate
2971*0Sstevel@tonic-gate DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2972*0Sstevel@tonic-gate "rsmka_topology_ioctl done: %d\n", error));
2973*0Sstevel@tonic-gate return (error);
2974*0Sstevel@tonic-gate }
2975