xref: /netbsd-src/sys/dev/scsipi/if_se.c (revision 5caf51cf3a4ca056ee08abd9dd39bfb45e084a14)
1 /*	$NetBSD: if_se.c,v 1.119 2023/12/20 18:09:19 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Ian W. Dall <ian.dall@dsto.defence.gov.au>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Ian W. Dall.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Driver for Cabletron EA41x scsi ethernet adaptor.
35  *
36  * Written by Ian Dall <ian.dall@dsto.defence.gov.au> Feb 3, 1997
37  *
38  * Acknowledgement: Thanks are due to Philip L. Budne <budd@cs.bu.edu>
39  * who reverse engineered the EA41x. In developing this code,
40  * Phil's userland daemon "etherd", was referred to extensively in lieu
41  * of accurate documentation for the device.
42  *
43  * This is a weird device! It doesn't conform to the scsi spec in much
44  * at all. About the only standard command supported is inquiry. Most
45  * commands are 6 bytes long, but the recv data is only 1 byte.  Data
46  * must be received by periodically polling the device with the recv
47  * command.
48  *
49  * This driver is also a bit unusual. It must look like a network
50  * interface and it must also appear to be a scsi device to the scsi
51  * system. Hence there are cases where there are two entry points. eg
52  * sedone is to be called from the scsi subsystem and se_ifstart from
53  * the network interface subsystem.  In addition, to facilitate scsi
54  * commands issued by userland programs, there are open, close and
55  * ioctl entry points. This allows a user program to, for example,
56  * display the ea41x stats and download new code into the adaptor ---
57  * functions which can't be performed through the ifconfig interface.
58  * Normal operation does not require any special userland program.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: if_se.c,v 1.119 2023/12/20 18:09:19 skrll Exp $");
63 
64 #ifdef _KERNEL_OPT
65 #include "opt_inet.h"
66 #include "opt_atalk.h"
67 #endif
68 
69 #include <sys/param.h>
70 #include <sys/types.h>
71 
72 #include <sys/buf.h>
73 #include <sys/callout.h>
74 #include <sys/conf.h>
75 #include <sys/device.h>
76 #include <sys/disk.h>
77 #include <sys/disklabel.h>
78 #include <sys/errno.h>
79 #include <sys/file.h>
80 #include <sys/ioctl.h>
81 #include <sys/kernel.h>
82 #include <sys/malloc.h>
83 #include <sys/mbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/socket.h>
87 #include <sys/stat.h>
88 #include <sys/syslog.h>
89 #include <sys/systm.h>
90 #include <sys/uio.h>
91 #include <sys/workqueue.h>
92 
93 #include <dev/scsipi/scsi_ctron_ether.h>
94 #include <dev/scsipi/scsiconf.h>
95 #include <dev/scsipi/scsipi_all.h>
96 
97 #include <net/bpf.h>
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_ether.h>
101 #include <net/if_media.h>
102 
103 #ifdef INET
104 #include <netinet/if_inarp.h>
105 #include <netinet/in.h>
106 #endif
107 
108 #ifdef NETATALK
109 #include <netatalk/at.h>
110 #endif
111 
112 #define SETIMEOUT	1000
113 #define	SEOUTSTANDING	4
114 #define	SERETRIES	4
115 #define SE_PREFIX	4
116 #define ETHER_CRC	4
117 #define SEMINSIZE	60
118 
119 /* Make this big enough for an ETHERMTU packet in promiscuous mode. */
120 #define MAX_SNAP	(ETHERMTU + sizeof(struct ether_header) + \
121 			 SE_PREFIX + ETHER_CRC)
122 
123 /* 10 full length packets appears to be the max ever returned. 16k is OK */
124 #define RBUF_LEN	(16 * 1024)
125 
126 /* Tuning parameters:
127  * The EA41x only returns a maximum of 10 packets (regardless of size).
128  * We will attempt to adapt to polling fast enough to get RDATA_GOAL packets
129  * per read
130  */
131 #define RDATA_MAX 10
132 #define RDATA_GOAL 8
133 
134 /* se_poll and se_poll0 are the normal polling rate and the minimum
135  * polling rate respectively. se_poll0 should be chosen so that at
136  * maximum ethernet speed, we will read nearly RDATA_MAX packets. se_poll
137  * should be chosen for reasonable maximum latency.
138  * In practice, if we are being saturated with min length packets, we
139  * can't poll fast enough. Polling with zero delay actually
140  * worsens performance. se_poll0 is enforced to be always at least 1
141  */
142 #define SE_POLL 40		/* default in milliseconds */
143 #define SE_POLL0 10		/* default in milliseconds */
144 int se_poll = 0;		/* Delay in ticks set at attach time */
145 int se_poll0 = 0;
146 #ifdef SE_DEBUG
147 int se_max_received = 0;	/* Instrumentation */
148 #endif
149 
150 #define	PROTOCMD(p, d) \
151 	((d) = (p))
152 
153 #define	PROTOCMD_DECL(name) \
154 	static const struct scsi_ctron_ether_generic name
155 
156 #define	PROTOCMD_DECL_SPECIAL(name) \
157 	static const struct __CONCAT(scsi_, name) name
158 
159 /* Command initializers for commands using scsi_ctron_ether_generic */
160 PROTOCMD_DECL(ctron_ether_send)	 = {CTRON_ETHER_SEND, 0, {0,0}, 0};
161 PROTOCMD_DECL(ctron_ether_add_proto) = {CTRON_ETHER_ADD_PROTO, 0, {0,0}, 0};
162 PROTOCMD_DECL(ctron_ether_get_addr) = {CTRON_ETHER_GET_ADDR, 0, {0,0}, 0};
163 PROTOCMD_DECL(ctron_ether_set_media) = {CTRON_ETHER_SET_MEDIA, 0, {0,0}, 0};
164 PROTOCMD_DECL(ctron_ether_set_addr) = {CTRON_ETHER_SET_ADDR, 0, {0,0}, 0};
165 PROTOCMD_DECL(ctron_ether_set_multi) = {CTRON_ETHER_SET_MULTI, 0, {0,0}, 0};
166 PROTOCMD_DECL(ctron_ether_remove_multi) =
167     {CTRON_ETHER_REMOVE_MULTI, 0, {0,0}, 0};
168 
169 /* Command initializers for commands using their own structures */
170 PROTOCMD_DECL_SPECIAL(ctron_ether_recv) = {CTRON_ETHER_RECV};
171 PROTOCMD_DECL_SPECIAL(ctron_ether_set_mode) =
172     {CTRON_ETHER_SET_MODE, 0, {0,0}, 0};
173 
174 struct se_softc {
175 	device_t sc_dev;
176 	struct ethercom sc_ethercom;	/* Ethernet common part */
177 	struct scsipi_periph *sc_periph;/* contains our targ, lun, etc. */
178 
179 	struct callout sc_recv_ch;
180 	struct kmutex sc_iflock;
181 	struct if_percpuq *sc_ipq;
182 	struct workqueue *sc_recv_wq, *sc_send_wq;
183 	struct work sc_recv_work, sc_send_work;
184 	int sc_recv_work_pending, sc_send_work_pending;
185 
186 	char *sc_tbuf;
187 	char *sc_rbuf;
188 	int protos;
189 #define PROTO_IP	0x01
190 #define PROTO_ARP	0x02
191 #define PROTO_REVARP	0x04
192 #define PROTO_AT	0x08
193 #define PROTO_AARP	0x10
194 	int sc_debug;
195 	int sc_flags;
196 	int sc_last_timeout;
197 	int sc_enabled;
198 	int sc_attach_state;
199 };
200 
201 static int	sematch(device_t, cfdata_t, void *);
202 static void	seattach(device_t, device_t, void *);
203 static int	sedetach(device_t, int);
204 
205 static void	se_ifstart(struct ifnet *);
206 
207 static void	sedone(struct scsipi_xfer *, int);
208 static int	se_ioctl(struct ifnet *, u_long, void *);
209 #if 0
210 static void	sewatchdog(struct ifnet *);
211 #endif
212 
213 #if 0
214 static inline uint16_t ether_cmp(void *, void *);
215 #endif
216 static void	se_recv_callout(void *);
217 static void	se_recv_worker(struct work *wk, void *cookie);
218 static void	se_recv(struct se_softc *);
219 static struct mbuf *se_get(struct se_softc *, char *, int);
220 static int	se_read(struct se_softc *, char *, int);
221 #if 0
222 static void	se_reset(struct se_softc *);
223 #endif
224 static int	se_add_proto(struct se_softc *, int);
225 static int	se_get_addr(struct se_softc *, uint8_t *);
226 static int	se_set_media(struct se_softc *, int);
227 static int	se_init(struct se_softc *);
228 static int	se_set_multi(struct se_softc *, uint8_t *);
229 static int	se_remove_multi(struct se_softc *, uint8_t *);
230 #if 0
231 static int	sc_set_all_multi(struct se_softc *, int);
232 #endif
233 static void	se_stop(struct se_softc *);
234 static inline int se_scsipi_cmd(struct scsipi_periph *periph,
235 			struct scsipi_generic *scsipi_cmd,
236 			int cmdlen, u_char *data_addr, int datalen,
237 			int retries, int timeout, struct buf *bp,
238 			int flags);
239 static void	se_send_worker(struct work *wk, void *cookie);
240 static int	se_set_mode(struct se_softc *, int, int);
241 
242 int	se_enable(struct se_softc *);
243 void	se_disable(struct se_softc *);
244 
245 CFATTACH_DECL_NEW(se, sizeof(struct se_softc),
246     sematch, seattach, sedetach, NULL);
247 
248 extern struct cfdriver se_cd;
249 
250 dev_type_open(seopen);
251 dev_type_close(seclose);
252 dev_type_ioctl(seioctl);
253 
254 const struct cdevsw se_cdevsw = {
255 	.d_open = seopen,
256 	.d_close = seclose,
257 	.d_read = noread,
258 	.d_write = nowrite,
259 	.d_ioctl = seioctl,
260 	.d_stop = nostop,
261 	.d_tty = notty,
262 	.d_poll = nopoll,
263 	.d_mmap = nommap,
264 	.d_kqfilter = nokqfilter,
265 	.d_discard = nodiscard,
266 	.d_flag = D_OTHER | D_MPSAFE
267 };
268 
269 const struct scsipi_periphsw se_switch = {
270 	NULL,			/* Use default error handler */
271 	NULL,			/* have no queue */
272 	NULL,			/* have no async handler */
273 	sedone,			/* deal with send/recv completion */
274 };
275 
276 const struct scsipi_inquiry_pattern se_patterns[] = {
277 	{T_PROCESSOR, T_FIXED,
278 	 "CABLETRN",	     "EA412",		      ""},
279 	{T_PROCESSOR, T_FIXED,
280 	 "Cabletrn",	     "EA412",		      ""},
281 };
282 
283 #if 0
284 /*
285  * Compare two Ether/802 addresses for equality, inlined and
286  * unrolled for speed.
287  * Note: use this like memcmp()
288  */
289 static inline uint16_t
290 ether_cmp(void *one, void *two)
291 {
292 	uint16_t *a = (uint16_t *) one;
293 	uint16_t *b = (uint16_t *) two;
294 	uint16_t diff;
295 
296 	diff = (a[0] - b[0]) | (a[1] - b[1]) | (a[2] - b[2]);
297 
298 	return (diff);
299 }
300 
301 #define ETHER_CMP	ether_cmp
302 #endif
303 
304 static int
sematch(device_t parent,cfdata_t match,void * aux)305 sematch(device_t parent, cfdata_t match, void *aux)
306 {
307 	struct scsipibus_attach_args *sa = aux;
308 	int priority;
309 
310 	(void)scsipi_inqmatch(&sa->sa_inqbuf,
311 	    se_patterns, sizeof(se_patterns) / sizeof(se_patterns[0]),
312 	    sizeof(se_patterns[0]), &priority);
313 	return (priority);
314 }
315 
316 /*
317  * The routine called by the low level scsi routine when it discovers
318  * a device suitable for this driver.
319  */
320 static void
seattach(device_t parent,device_t self,void * aux)321 seattach(device_t parent, device_t self, void *aux)
322 {
323 	struct se_softc *sc = device_private(self);
324 	struct scsipibus_attach_args *sa = aux;
325 	struct scsipi_periph *periph = sa->sa_periph;
326 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
327 	uint8_t myaddr[ETHER_ADDR_LEN];
328 	char wqname[MAXCOMLEN];
329 	int rv;
330 
331 	sc->sc_dev = self;
332 
333 	printf("\n");
334 	SC_DEBUG(periph, SCSIPI_DB2, ("seattach: "));
335 
336 	sc->sc_attach_state = 0;
337 	callout_init(&sc->sc_recv_ch, CALLOUT_MPSAFE);
338 	callout_setfunc(&sc->sc_recv_ch, se_recv_callout, (void *)sc);
339 	mutex_init(&sc->sc_iflock, MUTEX_DEFAULT, IPL_SOFTNET);
340 
341 	/*
342 	 * Store information needed to contact our base driver
343 	 */
344 	sc->sc_periph = periph;
345 	periph->periph_dev = sc->sc_dev;
346 	periph->periph_switch = &se_switch;
347 
348 	se_poll = (SE_POLL * hz) / 1000;
349 	se_poll = se_poll? se_poll: 1;
350 	se_poll0 = (SE_POLL0 * hz) / 1000;
351 	se_poll0 = se_poll0? se_poll0: 1;
352 
353 	/*
354 	 * Initialize and attach send and receive buffers
355 	 */
356 	sc->sc_tbuf = malloc(ETHERMTU + sizeof(struct ether_header),
357 			     M_DEVBUF, M_WAITOK);
358 	sc->sc_rbuf = malloc(RBUF_LEN, M_DEVBUF, M_WAITOK);
359 
360 	/* Initialize ifnet structure. */
361 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
362 	ifp->if_softc = sc;
363 	ifp->if_start = se_ifstart;
364 	ifp->if_ioctl = se_ioctl;
365 	ifp->if_watchdog = NULL;
366 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
367 	ifp->if_extflags = IFEF_MPSAFE;
368 	IFQ_SET_READY(&ifp->if_snd);
369 
370 	se_get_addr(sc, myaddr);
371 	sc->sc_attach_state = 1;
372 
373 	/* Attach the interface. */
374 	if_initialize(ifp);
375 
376 	snprintf(wqname, sizeof(wqname), "%sRx", device_xname(sc->sc_dev));
377 	rv = workqueue_create(&sc->sc_recv_wq, wqname, se_recv_worker, sc,
378 	    PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
379 	if (rv != 0) {
380 		aprint_error_dev(sc->sc_dev,
381 		    "unable to create recv Rx workqueue\n");
382 		sedetach(sc->sc_dev, 0);
383 		return; /* Error */
384 	}
385 	sc->sc_recv_work_pending = false;
386 	sc->sc_attach_state = 2;
387 
388 	snprintf(wqname, sizeof(wqname), "%sTx", device_xname(sc->sc_dev));
389 	rv = workqueue_create(&sc->sc_send_wq, wqname, se_send_worker, ifp,
390 	    PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
391 	if (rv != 0) {
392 		aprint_error_dev(sc->sc_dev,
393 		    "unable to create send Tx workqueue\n");
394 		sedetach(sc->sc_dev, 0);
395 		return; /* Error */
396 	}
397 	sc->sc_send_work_pending = false;
398 	sc->sc_attach_state = 3;
399 
400 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
401 	ether_ifattach(ifp, myaddr);
402 	if_register(ifp);
403 	sc->sc_attach_state = 4;
404 }
405 
406 static int
sedetach(device_t self,int flags)407 sedetach(device_t self, int flags)
408 {
409 	struct se_softc *sc = device_private(self);
410 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
411 
412 	switch(sc->sc_attach_state) {
413 	case 4:
414 		se_stop(sc);
415 		mutex_enter(&sc->sc_iflock);
416 		ifp->if_flags &= ~IFF_RUNNING;
417 		se_disable(sc);
418 		ether_ifdetach(ifp);
419 		if_detach(ifp);
420 		mutex_exit(&sc->sc_iflock);
421 		if_percpuq_destroy(sc->sc_ipq);
422 		/*FALLTHROUGH*/
423 	case 3:
424 		workqueue_destroy(sc->sc_send_wq);
425 		/*FALLTHROUGH*/
426 	case 2:
427 		workqueue_destroy(sc->sc_recv_wq);
428 		/*FALLTHROUGH*/
429 	case 1:
430 		free(sc->sc_rbuf, M_DEVBUF);
431 		free(sc->sc_tbuf, M_DEVBUF);
432 		callout_destroy(&sc->sc_recv_ch);
433 		mutex_destroy(&sc->sc_iflock);
434 		break;
435 	default:
436 		aprint_error_dev(sc->sc_dev, "detach failed (state %d)\n",
437 		    sc->sc_attach_state);
438 		return 1;
439 		break;
440 	}
441 	return 0;
442 }
443 
444 /*
445  * Send a command to the device
446  */
447 static inline int
se_scsipi_cmd(struct scsipi_periph * periph,struct scsipi_generic * cmd,int cmdlen,u_char * data_addr,int datalen,int retries,int timeout,struct buf * bp,int flags)448 se_scsipi_cmd(struct scsipi_periph *periph, struct scsipi_generic *cmd,
449     int cmdlen, u_char *data_addr, int datalen, int retries, int timeout,
450     struct buf *bp, int flags)
451 {
452 	int error;
453 
454 	error = scsipi_command(periph, cmd, cmdlen, data_addr,
455 	    datalen, retries, timeout, bp, flags);
456 	return (error);
457 }
458 
459 /*
460  * Start routine for calling from network sub system
461  */
462 static void
se_ifstart(struct ifnet * ifp)463 se_ifstart(struct ifnet *ifp)
464 {
465 	struct se_softc *sc = ifp->if_softc;
466 
467 	mutex_enter(&sc->sc_iflock);
468 	if (!sc->sc_send_work_pending)  {
469 		sc->sc_send_work_pending = true;
470 		workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work, NULL);
471 	}
472 	/* else: nothing to do - work is already queued */
473 	mutex_exit(&sc->sc_iflock);
474 }
475 
476 /*
477  * Invoke the transmit workqueue and transmission on the interface.
478  */
479 static void
se_send_worker(struct work * wk,void * cookie)480 se_send_worker(struct work *wk, void *cookie)
481 {
482 	struct ifnet *ifp = cookie;
483 	struct se_softc *sc = ifp->if_softc;
484 	struct scsi_ctron_ether_generic send_cmd;
485 	struct mbuf *m, *m0;
486 	int len, error;
487 	u_char *cp;
488 
489 	mutex_enter(&sc->sc_iflock);
490 	sc->sc_send_work_pending = false;
491 	mutex_exit(&sc->sc_iflock);
492 
493 	KASSERT(if_is_mpsafe(ifp));
494 
495 	/* Don't transmit if interface is busy or not running */
496 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
497 		return;
498 
499 	while (1) {
500 		IFQ_DEQUEUE(&ifp->if_snd, m0);
501 		if (m0 == 0)
502 			break;
503 
504 		/* If BPF is listening on this interface, let it see the
505 		 * packet before we commit it to the wire.
506 		 */
507 		bpf_mtap(ifp, m0, BPF_D_OUT);
508 
509 		/* We need to use m->m_pkthdr.len, so require the header */
510 		if ((m0->m_flags & M_PKTHDR) == 0)
511 			panic("ctscstart: no header mbuf");
512 		len = m0->m_pkthdr.len;
513 
514 		/* Mark the interface busy. */
515 		ifp->if_flags |= IFF_OACTIVE;
516 
517 		/* Chain; copy into linear buffer allocated at attach time. */
518 		cp = sc->sc_tbuf;
519 		for (m = m0; m != NULL; ) {
520 			memcpy(cp, mtod(m, u_char *), m->m_len);
521 			cp += m->m_len;
522 			m = m0 = m_free(m);
523 		}
524 		if (len < SEMINSIZE) {
525 #ifdef SEDEBUG
526 			if (sc->sc_debug)
527 				printf("se: packet size %d (%zu) < %d\n", len,
528 				    cp - (u_char *)sc->sc_tbuf, SEMINSIZE);
529 #endif
530 			memset(cp, 0, SEMINSIZE - len);
531 			len = SEMINSIZE;
532 		}
533 
534 		/* Fill out SCSI command. */
535 		PROTOCMD(ctron_ether_send, send_cmd);
536 		_lto2b(len, send_cmd.length);
537 
538 		/* Send command to device. */
539 		error = se_scsipi_cmd(sc->sc_periph,
540 		    (void *)&send_cmd, sizeof(send_cmd),
541 		    sc->sc_tbuf, len, SERETRIES,
542 		    SETIMEOUT, NULL, XS_CTL_NOSLEEP | XS_CTL_DATA_OUT);
543 		if (error) {
544 			aprint_error_dev(sc->sc_dev,
545 			    "not queued, error %d\n", error);
546 			if_statinc(ifp, if_oerrors);
547 			ifp->if_flags &= ~IFF_OACTIVE;
548 		} else
549 			if_statinc(ifp, if_opackets);
550 	}
551 }
552 
553 
554 /*
555  * Called from the scsibus layer via our scsi device switch.
556  */
557 static void
sedone(struct scsipi_xfer * xs,int error)558 sedone(struct scsipi_xfer *xs, int error)
559 {
560 	struct se_softc *sc = device_private(xs->xs_periph->periph_dev);
561 	struct scsipi_generic *cmd = xs->cmd;
562 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
563 
564 	if (IS_SEND(cmd)) {
565 		ifp->if_flags &= ~IFF_OACTIVE;
566 	} else if (IS_RECV(cmd)) {
567 		/* RECV complete */
568 		/* pass data up. reschedule a recv */
569 		/* scsipi_free_xs will call start. Harmless. */
570 		if (error) {
571 			/* Reschedule after a delay */
572 			callout_schedule(&sc->sc_recv_ch, se_poll);
573 		} else {
574 			int n, ntimeo;
575 			n = se_read(sc, xs->data, xs->datalen - xs->resid);
576 #ifdef SE_DEBUG
577 			if (n > se_max_received)
578 				se_max_received = n;
579 #endif
580 			if (n == 0)
581 				ntimeo = se_poll;
582 			else if (n >= RDATA_MAX)
583 				ntimeo = se_poll0;
584 			else {
585 				ntimeo = sc->sc_last_timeout;
586 				ntimeo = (ntimeo * RDATA_GOAL)/n;
587 				ntimeo = (ntimeo < se_poll0?
588 					  se_poll0: ntimeo);
589 				ntimeo = (ntimeo > se_poll?
590 					  se_poll: ntimeo);
591 			}
592 			sc->sc_last_timeout = ntimeo;
593 			callout_schedule(&sc->sc_recv_ch, ntimeo);
594 		}
595 	}
596 }
597 
598 /*
599  * Setup a receive command by queuing the work.
600  * Usually called from a callout, but also from se_init().
601  */
602 static void
se_recv_callout(void * v)603 se_recv_callout(void *v)
604 {
605 	/* do a recv command */
606 	struct se_softc *sc = (struct se_softc *) v;
607 
608 	if (sc->sc_enabled == 0)
609 		return;
610 
611 	mutex_enter(&sc->sc_iflock);
612 	if (sc->sc_recv_work_pending == true) {
613 		callout_schedule(&sc->sc_recv_ch, se_poll);
614 		mutex_exit(&sc->sc_iflock);
615 		return;
616 	}
617 
618 	sc->sc_recv_work_pending = true;
619 	workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work, NULL);
620 	mutex_exit(&sc->sc_iflock);
621 }
622 
623 /*
624  * Invoke the receive workqueue
625  */
626 static void
se_recv_worker(struct work * wk,void * cookie)627 se_recv_worker(struct work *wk, void *cookie)
628 {
629 	struct se_softc *sc = (struct se_softc *) cookie;
630 
631 	mutex_enter(&sc->sc_iflock);
632 	sc->sc_recv_work_pending = false;
633 	mutex_exit(&sc->sc_iflock);
634 	se_recv(sc);
635 
636 }
637 
638 /*
639  * Do the actual work of receiving data.
640  */
641 static void
se_recv(struct se_softc * sc)642 se_recv(struct se_softc *sc)
643 {
644 	struct scsi_ctron_ether_recv recv_cmd;
645 	int error;
646 
647 	/* do a recv command */
648 	PROTOCMD(ctron_ether_recv, recv_cmd);
649 
650 	error = se_scsipi_cmd(sc->sc_periph,
651 	    (void *)&recv_cmd, sizeof(recv_cmd),
652 	    sc->sc_rbuf, RBUF_LEN, SERETRIES, SETIMEOUT, NULL,
653 	    XS_CTL_NOSLEEP | XS_CTL_DATA_IN);
654 	if (error)
655 		callout_schedule(&sc->sc_recv_ch, se_poll);
656 }
657 
658 /*
659  * We copy the data into mbufs.  When full cluster sized units are present
660  * we copy into clusters.
661  */
662 static struct mbuf *
se_get(struct se_softc * sc,char * data,int totlen)663 se_get(struct se_softc *sc, char *data, int totlen)
664 {
665 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
666 	struct mbuf *m, *m0, *newm;
667 	int len;
668 
669 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
670 	if (m0 == 0)
671 		return (0);
672 	m_set_rcvif(m0, ifp);
673 	m0->m_pkthdr.len = totlen;
674 	len = MHLEN;
675 	m = m0;
676 
677 	while (totlen > 0) {
678 		if (totlen >= MINCLSIZE) {
679 			MCLGET(m, M_DONTWAIT);
680 			if ((m->m_flags & M_EXT) == 0)
681 				goto bad;
682 			len = MCLBYTES;
683 		}
684 
685 		if (m == m0) {
686 			char *newdata = (char *)
687 			    ALIGN(m->m_data + sizeof(struct ether_header)) -
688 			    sizeof(struct ether_header);
689 			len -= newdata - m->m_data;
690 			m->m_data = newdata;
691 		}
692 
693 		m->m_len = len = uimin(totlen, len);
694 		memcpy(mtod(m, void *), data, len);
695 		data += len;
696 
697 		totlen -= len;
698 		if (totlen > 0) {
699 			MGET(newm, M_DONTWAIT, MT_DATA);
700 			if (newm == 0)
701 				goto bad;
702 			len = MLEN;
703 			m = m->m_next = newm;
704 		}
705 	}
706 
707 	return (m0);
708 
709 bad:
710 	m_freem(m0);
711 	return (0);
712 }
713 
714 /*
715  * Pass packets to higher levels.
716  */
717 static int
se_read(struct se_softc * sc,char * data,int datalen)718 se_read(struct se_softc *sc, char *data, int datalen)
719 {
720 	struct mbuf *m;
721 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
722 	int n;
723 
724 	n = 0;
725 	while (datalen >= 2) {
726 		int len = _2btol(data);
727 		data += 2;
728 		datalen -= 2;
729 
730 		if (len == 0)
731 			break;
732 #ifdef SEDEBUG
733 		if (sc->sc_debug) {
734 			printf("se_read: datalen = %d, packetlen = %d, proto = 0x%04x\n", datalen, len,
735 			 ntohs(((struct ether_header *)data)->ether_type));
736 		}
737 #endif
738 		if (len <= sizeof(struct ether_header) ||
739 		    len > MAX_SNAP) {
740 #ifdef SEDEBUG
741 			printf("%s: invalid packet size %d; dropping\n",
742 			       device_xname(sc->sc_dev), len);
743 #endif
744 			if_statinc(ifp, if_ierrors);
745 			goto next_packet;
746 		}
747 
748 		/* Don't need crc. Must keep ether header for BPF */
749 		m = se_get(sc, data, len - ETHER_CRC);
750 		if (m == 0) {
751 #ifdef SEDEBUG
752 			if (sc->sc_debug)
753 				printf("se_read: se_get returned null\n");
754 #endif
755 			if_statinc(ifp, if_ierrors);
756 			goto next_packet;
757 		}
758 		if ((ifp->if_flags & IFF_PROMISC) != 0) {
759 			m_adj(m, SE_PREFIX);
760 		}
761 
762 		/* Pass the packet up. */
763 		if_percpuq_enqueue(sc->sc_ipq, m);
764 
765 	next_packet:
766 		data += len;
767 		datalen -= len;
768 		n++;
769 	}
770 	return (n);
771 }
772 
773 #if 0
774 static void
775 sewatchdog(struct ifnet *ifp)
776 {
777 	struct se_softc *sc = ifp->if_softc;
778 
779 	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
780 	if_statinc(ifp, if_oerrors);
781 
782 	se_reset(sc);
783 }
784 
785 static void
786 se_reset(struct se_softc *sc)
787 {
788 #if 0
789 	/* Maybe we don't *really* want to reset the entire bus
790 	 * because the ctron isn't working. We would like to send a
791 	 * "BUS DEVICE RESET" message, but don't think the ctron
792 	 * understands it.
793 	 */
794 	se_scsipi_cmd(sc->sc_periph, 0, 0, 0, 0, SERETRIES, 2000, NULL,
795 	    XS_CTL_RESET);
796 #endif
797 	se_init(sc);
798 }
799 #endif
800 
801 static int
se_add_proto(struct se_softc * sc,int proto)802 se_add_proto(struct se_softc *sc, int proto)
803 {
804 	int error;
805 	struct scsi_ctron_ether_generic add_proto_cmd;
806 	uint8_t data[2];
807 	_lto2b(proto, data);
808 #ifdef SEDEBUG
809 	if (sc->sc_debug)
810 		printf("se: adding proto 0x%02x%02x\n", data[0], data[1]);
811 #endif
812 
813 	PROTOCMD(ctron_ether_add_proto, add_proto_cmd);
814 	_lto2b(sizeof(data), add_proto_cmd.length);
815 	error = se_scsipi_cmd(sc->sc_periph,
816 	    (void *)&add_proto_cmd, sizeof(add_proto_cmd),
817 	    data, sizeof(data), SERETRIES, SETIMEOUT, NULL,
818 	    XS_CTL_DATA_OUT);
819 	return (error);
820 }
821 
822 static int
se_get_addr(struct se_softc * sc,uint8_t * myaddr)823 se_get_addr(struct se_softc *sc, uint8_t *myaddr)
824 {
825 	int error;
826 	struct scsi_ctron_ether_generic get_addr_cmd;
827 
828 	PROTOCMD(ctron_ether_get_addr, get_addr_cmd);
829 	_lto2b(ETHER_ADDR_LEN, get_addr_cmd.length);
830 	error = se_scsipi_cmd(sc->sc_periph,
831 	    (void *)&get_addr_cmd, sizeof(get_addr_cmd),
832 	    myaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
833 	    XS_CTL_DATA_IN);
834 	printf("%s: ethernet address %s\n", device_xname(sc->sc_dev),
835 	    ether_sprintf(myaddr));
836 	return (error);
837 }
838 
839 
840 static int
se_set_media(struct se_softc * sc,int type)841 se_set_media(struct se_softc *sc, int type)
842 {
843 	int error;
844 	struct scsi_ctron_ether_generic set_media_cmd;
845 
846 	PROTOCMD(ctron_ether_set_media, set_media_cmd);
847 	set_media_cmd.byte3 = type;
848 	error = se_scsipi_cmd(sc->sc_periph,
849 	    (void *)&set_media_cmd, sizeof(set_media_cmd),
850 	    0, 0, SERETRIES, SETIMEOUT, NULL, 0);
851 	return (error);
852 }
853 
854 static int
se_set_mode(struct se_softc * sc,int len,int mode)855 se_set_mode(struct se_softc *sc, int len, int mode)
856 {
857 	int error;
858 	struct scsi_ctron_ether_set_mode set_mode_cmd;
859 
860 	PROTOCMD(ctron_ether_set_mode, set_mode_cmd);
861 	set_mode_cmd.mode = mode;
862 	_lto2b(len, set_mode_cmd.length);
863 	error = se_scsipi_cmd(sc->sc_periph,
864 	    (void *)&set_mode_cmd, sizeof(set_mode_cmd),
865 	    0, 0, SERETRIES, SETIMEOUT, NULL, 0);
866 	return (error);
867 }
868 
869 
870 static int
se_init(struct se_softc * sc)871 se_init(struct se_softc *sc)
872 {
873 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
874 	struct scsi_ctron_ether_generic set_addr_cmd;
875 	uint8_t enaddr[ETHER_ADDR_LEN];
876 	int error;
877 
878 	if (ifp->if_flags & IFF_PROMISC) {
879 		error = se_set_mode(sc, MAX_SNAP, 1);
880 	}
881 	else
882 		error = se_set_mode(sc, ETHERMTU + sizeof(struct ether_header),
883 		    0);
884 	if (error != 0)
885 		return (error);
886 
887 	PROTOCMD(ctron_ether_set_addr, set_addr_cmd);
888 	_lto2b(ETHER_ADDR_LEN, set_addr_cmd.length);
889 	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
890 	error = se_scsipi_cmd(sc->sc_periph,
891 	    (void *)&set_addr_cmd, sizeof(set_addr_cmd),
892 	    enaddr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL,
893 	    XS_CTL_DATA_OUT);
894 	if (error != 0)
895 		return (error);
896 
897 	if ((sc->protos & PROTO_IP) &&
898 	    (error = se_add_proto(sc, ETHERTYPE_IP)) != 0)
899 		return (error);
900 	if ((sc->protos & PROTO_ARP) &&
901 	    (error = se_add_proto(sc, ETHERTYPE_ARP)) != 0)
902 		return (error);
903 	if ((sc->protos & PROTO_REVARP) &&
904 	    (error = se_add_proto(sc, ETHERTYPE_REVARP)) != 0)
905 		return (error);
906 #ifdef NETATALK
907 	if ((sc->protos & PROTO_AT) &&
908 	    (error = se_add_proto(sc, ETHERTYPE_ATALK)) != 0)
909 		return (error);
910 	if ((sc->protos & PROTO_AARP) &&
911 	    (error = se_add_proto(sc, ETHERTYPE_AARP)) != 0)
912 		return (error);
913 #endif
914 
915 	if ((ifp->if_flags & (IFF_RUNNING | IFF_UP)) == IFF_UP) {
916 		ifp->if_flags |= IFF_RUNNING;
917 		mutex_enter(&sc->sc_iflock);
918 		if (!sc->sc_recv_work_pending)  {
919 			sc->sc_recv_work_pending = true;
920 			workqueue_enqueue(sc->sc_recv_wq, &sc->sc_recv_work,
921 			    NULL);
922 		}
923 		mutex_exit(&sc->sc_iflock);
924 		ifp->if_flags &= ~IFF_OACTIVE;
925 		mutex_enter(&sc->sc_iflock);
926 		if (!sc->sc_send_work_pending)  {
927 			sc->sc_send_work_pending = true;
928 			workqueue_enqueue(sc->sc_send_wq, &sc->sc_send_work,
929 			    NULL);
930 		}
931 		mutex_exit(&sc->sc_iflock);
932 	}
933 	return (error);
934 }
935 
936 static int
se_set_multi(struct se_softc * sc,uint8_t * addr)937 se_set_multi(struct se_softc *sc, uint8_t *addr)
938 {
939 	struct scsi_ctron_ether_generic set_multi_cmd;
940 	int error;
941 
942 	if (sc->sc_debug)
943 		printf("%s: set_set_multi: %s\n", device_xname(sc->sc_dev),
944 		    ether_sprintf(addr));
945 
946 	PROTOCMD(ctron_ether_set_multi, set_multi_cmd);
947 	_lto2b(ETHER_ADDR_LEN, set_multi_cmd.length);
948 	error = se_scsipi_cmd(sc->sc_periph,
949 	    (void *)&set_multi_cmd, sizeof(set_multi_cmd),
950 	    addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
951 	return (error);
952 }
953 
954 static int
se_remove_multi(struct se_softc * sc,uint8_t * addr)955 se_remove_multi(struct se_softc *sc, uint8_t *addr)
956 {
957 	struct scsi_ctron_ether_generic remove_multi_cmd;
958 	int error;
959 
960 	if (sc->sc_debug)
961 		printf("%s: se_remove_multi: %s\n", device_xname(sc->sc_dev),
962 		    ether_sprintf(addr));
963 
964 	PROTOCMD(ctron_ether_remove_multi, remove_multi_cmd);
965 	_lto2b(ETHER_ADDR_LEN, remove_multi_cmd.length);
966 	error = se_scsipi_cmd(sc->sc_periph,
967 	    (void *)&remove_multi_cmd, sizeof(remove_multi_cmd),
968 	    addr, ETHER_ADDR_LEN, SERETRIES, SETIMEOUT, NULL, XS_CTL_DATA_OUT);
969 	return (error);
970 }
971 
972 #if 0	/* not used  --thorpej */
973 static int
974 sc_set_all_multi(struct se_softc *sc, int set)
975 {
976 	int error = 0;
977 	uint8_t *addr;
978 	struct ethercom *ec = &sc->sc_ethercom;
979 	struct ether_multi *enm;
980 	struct ether_multistep step;
981 
982 	ETHER_LOCK(ec);
983 	ETHER_FIRST_MULTI(step, ec, enm);
984 	while (enm != NULL) {
985 		if (ETHER_CMP(enm->enm_addrlo, enm->enm_addrhi)) {
986 			/*
987 			 * We must listen to a range of multicast addresses.
988 			 * For now, just accept all multicasts, rather than
989 			 * trying to set only those filter bits needed to match
990 			 * the range.  (At this time, the only use of address
991 			 * ranges is for IP multicast routing, for which the
992 			 * range is big enough to require all bits set.)
993 			 */
994 			/* We have no way of adding a range to this device.
995 			 * stepping through all addresses in the range is
996 			 * typically not possible. The only real alternative
997 			 * is to go into promicuous mode and filter by hand.
998 			 */
999 			ETHER_UNLOCK(ec);
1000 			return (ENODEV);
1001 
1002 		}
1003 
1004 		addr = enm->enm_addrlo;
1005 		if ((error = set ? se_set_multi(sc, addr) :
1006 		    se_remove_multi(sc, addr)) != 0)
1007 			return (error);
1008 		ETHER_NEXT_MULTI(step, enm);
1009 	}
1010 	ETHER_UNLOCK(ec);
1011 
1012 	return (error);
1013 }
1014 #endif /* not used */
1015 
1016 static void
se_stop(struct se_softc * sc)1017 se_stop(struct se_softc *sc)
1018 {
1019 
1020 	/* Don't schedule any reads */
1021 	callout_halt(&sc->sc_recv_ch, &sc->sc_iflock);
1022 
1023 	/* Wait for the workqueues to finish */
1024 	mutex_enter(&sc->sc_iflock);
1025 	workqueue_wait(sc->sc_recv_wq, &sc->sc_recv_work);
1026 	workqueue_wait(sc->sc_send_wq, &sc->sc_send_work);
1027 	mutex_exit(&sc->sc_iflock);
1028 
1029 	/* Abort any scsi cmds in progress */
1030 	mutex_enter(chan_mtx(sc->sc_periph->periph_channel));
1031 	scsipi_kill_pending(sc->sc_periph);
1032 	mutex_exit(chan_mtx(sc->sc_periph->periph_channel));
1033 }
1034 
1035 
1036 /*
1037  * Process an ioctl request.
1038  */
1039 static int
se_ioctl(struct ifnet * ifp,u_long cmd,void * data)1040 se_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1041 {
1042 	struct se_softc *sc = ifp->if_softc;
1043 	struct ifaddr *ifa = (struct ifaddr *)data;
1044 	struct ifreq *ifr = (struct ifreq *)data;
1045 	struct sockaddr *sa;
1046 	int error = 0;
1047 
1048 
1049 	switch (cmd) {
1050 
1051 	case SIOCINITIFADDR:
1052 		mutex_enter(&sc->sc_iflock);
1053 		if ((error = se_enable(sc)) != 0)
1054 			break;
1055 		ifp->if_flags |= IFF_UP;
1056 		mutex_exit(&sc->sc_iflock);
1057 
1058 		if ((error = se_set_media(sc, CMEDIA_AUTOSENSE)) != 0)
1059 			break;
1060 
1061 		switch (ifa->ifa_addr->sa_family) {
1062 #ifdef INET
1063 		case AF_INET:
1064 			sc->protos |= (PROTO_IP | PROTO_ARP | PROTO_REVARP);
1065 			if ((error = se_init(sc)) != 0)
1066 				break;
1067 			arp_ifinit(ifp, ifa);
1068 			break;
1069 #endif
1070 #ifdef NETATALK
1071 		case AF_APPLETALK:
1072 			sc->protos |= (PROTO_AT | PROTO_AARP);
1073 			if ((error = se_init(sc)) != 0)
1074 				break;
1075 			break;
1076 #endif
1077 		default:
1078 			error = se_init(sc);
1079 			break;
1080 		}
1081 		break;
1082 
1083 
1084 	case SIOCSIFFLAGS:
1085 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1086 			break;
1087 		/* XXX re-use ether_ioctl() */
1088 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1089 		case IFF_RUNNING:
1090 			/*
1091 			 * If interface is marked down and it is running, then
1092 			 * stop it.
1093 			 */
1094 			se_stop(sc);
1095 			mutex_enter(&sc->sc_iflock);
1096 			ifp->if_flags &= ~IFF_RUNNING;
1097 			se_disable(sc);
1098 			mutex_exit(&sc->sc_iflock);
1099 			break;
1100 		case IFF_UP:
1101 			/*
1102 			 * If interface is marked up and it is stopped, then
1103 			 * start it.
1104 			 */
1105 			mutex_enter(&sc->sc_iflock);
1106 			error = se_enable(sc);
1107 			mutex_exit(&sc->sc_iflock);
1108 			if (error)
1109 				break;
1110 			error = se_init(sc);
1111 			break;
1112 		default:
1113 			/*
1114 			 * Reset the interface to pick up changes in any other
1115 			 * flags that affect hardware registers.
1116 			 */
1117 			if (sc->sc_enabled)
1118 				error = se_init(sc);
1119 			break;
1120 		}
1121 #ifdef SEDEBUG
1122 		if (ifp->if_flags & IFF_DEBUG)
1123 			sc->sc_debug = 1;
1124 		else
1125 			sc->sc_debug = 0;
1126 #endif
1127 		break;
1128 
1129 	case SIOCADDMULTI:
1130 	case SIOCDELMULTI:
1131 		mutex_enter(&sc->sc_iflock);
1132 		sa = sockaddr_dup(ifreq_getaddr(cmd, ifr), M_WAITOK);
1133 		mutex_exit(&sc->sc_iflock);
1134 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
1135 			if (ifp->if_flags & IFF_RUNNING) {
1136 				error = (cmd == SIOCADDMULTI) ?
1137 				   se_set_multi(sc, sa->sa_data) :
1138 				   se_remove_multi(sc, sa->sa_data);
1139 			} else
1140 				error = 0;
1141 		}
1142 		mutex_enter(&sc->sc_iflock);
1143 		sockaddr_free(sa);
1144 		mutex_exit(&sc->sc_iflock);
1145 		break;
1146 
1147 	default:
1148 
1149 		error = ether_ioctl(ifp, cmd, data);
1150 		break;
1151 	}
1152 
1153 	return (error);
1154 }
1155 
1156 /*
1157  * Enable the network interface.
1158  */
1159 int
se_enable(struct se_softc * sc)1160 se_enable(struct se_softc *sc)
1161 {
1162 	struct scsipi_periph *periph = sc->sc_periph;
1163 	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1164 	int error = 0;
1165 
1166 	if (sc->sc_enabled == 0) {
1167 		if ((error = scsipi_adapter_addref(adapt)) == 0)
1168 			sc->sc_enabled = 1;
1169 		else
1170 			aprint_error_dev(sc->sc_dev, "device enable failed\n");
1171 	}
1172 	return (error);
1173 }
1174 
1175 /*
1176  * Disable the network interface.
1177  */
1178 void
se_disable(struct se_softc * sc)1179 se_disable(struct se_softc *sc)
1180 {
1181 	struct scsipi_periph *periph = sc->sc_periph;
1182 	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1183 
1184 	if (sc->sc_enabled != 0) {
1185 		scsipi_adapter_delref(adapt);
1186 		sc->sc_enabled = 0;
1187 	}
1188 }
1189 
1190 #define	SEUNIT(z)	(minor(z))
1191 /*
1192  * open the device.
1193  */
1194 int
seopen(dev_t dev,int flag,int fmt,struct lwp * l)1195 seopen(dev_t dev, int flag, int fmt, struct lwp *l)
1196 {
1197 	int unit, error;
1198 	struct se_softc *sc;
1199 	struct scsipi_periph *periph;
1200 	struct scsipi_adapter *adapt;
1201 
1202 	unit = SEUNIT(dev);
1203 	sc = device_lookup_private(&se_cd, unit);
1204 	if (sc == NULL)
1205 		return (ENXIO);
1206 
1207 	periph = sc->sc_periph;
1208 	adapt = periph->periph_channel->chan_adapter;
1209 
1210 	if ((error = scsipi_adapter_addref(adapt)) != 0)
1211 		return (error);
1212 
1213 	SC_DEBUG(periph, SCSIPI_DB1,
1214 	    ("scopen: dev=0x%"PRIx64" (unit %d (of %d))\n", dev, unit,
1215 	    se_cd.cd_ndevs));
1216 
1217 	periph->periph_flags |= PERIPH_OPEN;
1218 
1219 	SC_DEBUG(periph, SCSIPI_DB3, ("open complete\n"));
1220 	return (0);
1221 }
1222 
1223 /*
1224  * close the device.. only called if we are the LAST
1225  * occurrence of an open device
1226  */
1227 int
seclose(dev_t dev,int flag,int fmt,struct lwp * l)1228 seclose(dev_t dev, int flag, int fmt, struct lwp *l)
1229 {
1230 	struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1231 	struct scsipi_periph *periph = sc->sc_periph;
1232 	struct scsipi_adapter *adapt = periph->periph_channel->chan_adapter;
1233 
1234 	SC_DEBUG(sc->sc_periph, SCSIPI_DB1, ("closing\n"));
1235 
1236 	scsipi_wait_drain(periph);
1237 
1238 	scsipi_adapter_delref(adapt);
1239 	periph->periph_flags &= ~PERIPH_OPEN;
1240 
1241 	return (0);
1242 }
1243 
1244 /*
1245  * Perform special action on behalf of the user
1246  * Only does generic scsi ioctls.
1247  */
1248 int
seioctl(dev_t dev,u_long cmd,void * addr,int flag,struct lwp * l)1249 seioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
1250 {
1251 	struct se_softc *sc = device_lookup_private(&se_cd, SEUNIT(dev));
1252 
1253 	return (scsipi_do_ioctl(sc->sc_periph, dev, cmd, addr, flag, l));
1254 }
1255