xref: /openbsd-src/sys/dev/pci/vmwpvs.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: vmwpvs.c,v 1.13 2015/09/10 18:10:34 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2013 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/ioctl.h>
23 #include <sys/malloc.h>
24 #include <sys/kernel.h>
25 #include <sys/rwlock.h>
26 #include <sys/dkio.h>
27 #include <sys/task.h>
28 
29 #include <machine/bus.h>
30 
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 #include <dev/pci/pcidevs.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsi_message.h>
37 #include <scsi/scsiconf.h>
38 
39 /* pushbuttons */
40 #define VMWPVS_OPENINGS		64 /* according to the linux driver */
41 #define VMWPVS_RING_PAGES	2
42 #define VMWPVS_MAXSGL		(MAXPHYS / PAGE_SIZE)
43 #define VMWPVS_SENSELEN		roundup(sizeof(struct scsi_sense_data), 16)
44 
45 /* "chip" definitions */
46 
47 #define VMWPVS_R_COMMAND	0x0000
48 #define VMWPVS_R_COMMAND_DATA	0x0004
49 #define VMWPVS_R_COMMAND_STATUS	0x0008
50 #define VMWPVS_R_LAST_STS_0	0x0100
51 #define VMWPVS_R_LAST_STS_1	0x0104
52 #define VMWPVS_R_LAST_STS_2	0x0108
53 #define VMWPVS_R_LAST_STS_3	0x010c
54 #define VMWPVS_R_INTR_STATUS	0x100c
55 #define VMWPVS_R_INTR_MASK	0x2010
56 #define VMWPVS_R_KICK_NON_RW_IO	0x3014
57 #define VMWPVS_R_DEBUG		0x3018
58 #define VMWPVS_R_KICK_RW_IO	0x4018
59 
60 #define VMWPVS_INTR_CMPL_0	(1 << 0)
61 #define VMWPVS_INTR_CMPL_1	(1 << 1)
62 #define VMWPVS_INTR_CMPL_MASK	(VMWPVS_INTR_CMPL_0 | VMWPVS_INTR_CMPL_1)
63 #define VMWPVS_INTR_MSG_0	(1 << 2)
64 #define VMWPVS_INTR_MSG_1	(1 << 3)
65 #define VMWPVS_INTR_MSG_MASK	(VMWPVS_INTR_MSG_0 | VMWPVS_INTR_MSG_0)
66 #define VMWPVS_INTR_ALL_MASK	(VMWPVS_INTR_CMPL_MASK | VMWPVS_INTR_MSG_MASK)
67 
68 #define VMWPVS_PAGE_SHIFT	12
69 #define VMWPVS_PAGE_SIZE	(1 << VMWPVS_PAGE_SHIFT)
70 
71 #define VMWPVS_NPG_COMMAND	1
72 #define VMWPVS_NPG_INTR_STATUS	1
73 #define VMWPVS_NPG_MISC		2
74 #define VMWPVS_NPG_KICK_IO	2
75 #define VMWPVS_NPG_MSI_X	2
76 
77 #define VMWPVS_PG_COMMAND	0
78 #define VMWPVS_PG_INTR_STATUS	(VMWPVS_PG_COMMAND + \
79 				    VMWPVS_NPG_COMMAND * VMWPVS_PAGE_SIZE)
80 #define VMWPVS_PG_MISC		(VMWPVS_PG_INTR_STATUS + \
81 				    VMWPVS_NPG_INTR_STATUS * VMWPVS_PAGE_SIZE)
82 #define VMWPVS_PG_KICK_IO	(VMWPVS_PG_MISC + \
83 				    VMWPVS_NPG_MISC * VMWPVS_PAGE_SIZE)
84 #define VMWPVS_PG_MSI_X		(VMWPVS_PG_KICK_IO + \
85 				    VMWPVS_NPG_KICK_IO * VMWPVS_PAGE_SIZE)
86 #define VMMPVS_PG_LEN		(VMWPVS_PG_MSI_X + \
87 				    VMWPVS_NPG_MSI_X * VMWPVS_PAGE_SIZE)
88 
89 struct vmwpvw_ring_state {
90 	u_int32_t		req_prod;
91 	u_int32_t		req_cons;
92 	u_int32_t		req_entries; /* log 2 */
93 
94 	u_int32_t		cmp_prod;
95 	u_int32_t		cmp_cons;
96 	u_int32_t		cmp_entries; /* log 2 */
97 
98 	u_int32_t		__reserved[26];
99 
100 	u_int32_t		msg_prod;
101 	u_int32_t		msg_cons;
102 	u_int32_t		msg_entries; /* log 2 */
103 } __packed;
104 
105 struct vmwpvs_ring_req {
106 	u_int64_t		context;
107 
108 	u_int64_t		data_addr;
109 	u_int64_t		data_len;
110 
111 	u_int64_t		sense_addr;
112 	u_int32_t		sense_len;
113 
114 	u_int32_t		flags;
115 #define VMWPVS_REQ_SGL			(1 << 0)
116 #define VMWPVS_REQ_OOBCDB		(1 << 1)
117 #define VMWPVS_REQ_DIR_NONE		(1 << 2)
118 #define VMWPVS_REQ_DIR_IN		(1 << 3)
119 #define VMWPVS_REQ_DIR_OUT		(1 << 4)
120 
121 	u_int8_t		cdb[16];
122 	u_int8_t		cdblen;
123 	u_int8_t		lun[8];
124 	u_int8_t		tag;
125 	u_int8_t		bus;
126 	u_int8_t		target;
127 	u_int8_t		vcpu_hint;
128 
129 	u_int8_t		__reserved[59];
130 } __packed;
131 #define VMWPVS_REQ_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
132 				    sizeof(struct vmwpvs_ring_req))
133 
134 struct vmwpvs_ring_cmp {
135 	u_int64_t		context;
136 	u_int64_t		data_len;
137 	u_int32_t		sense_len;
138 	u_int16_t		host_status;
139 	u_int16_t		scsi_status;
140 	u_int32_t		__reserved[2];
141 } __packed;
142 #define VMWPVS_CMP_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
143 				    sizeof(struct vmwpvs_ring_cmp))
144 
145 struct vmwpvs_sge {
146 	u_int64_t		addr;
147 	u_int32_t		len;
148 	u_int32_t		flags;
149 } __packed;
150 
151 struct vmwpvs_ring_msg {
152 	u_int32_t		type;
153 	u_int32_t		__args[31];
154 } __packed;
155 #define VMWPVS_MSG_COUNT	((VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) / \
156 				    sizeof(struct vmwpvs_ring_msg))
157 
158 #define VMWPVS_MSG_T_ADDED	0
159 #define VMWPVS_MSG_T_REMOVED	1
160 
161 struct vmwpvs_ring_msg_dev {
162 	u_int32_t		type;
163 	u_int32_t		bus;
164 	u_int32_t		target;
165 	u_int8_t		lun[8];
166 
167 	u_int32_t		__pad[27];
168 } __packed;
169 
170 struct vmwpvs_cfg_cmd {
171 	u_int64_t		cmp_addr;
172 	u_int32_t		pg_addr;
173 	u_int32_t		pg_addr_type;
174 	u_int32_t		pg_num;
175 	u_int32_t		__reserved;
176 } __packed;
177 
178 #define VMWPVS_MAX_RING_PAGES		32
179 struct vmwpvs_setup_rings_cmd {
180 	u_int32_t		req_pages;
181 	u_int32_t		cmp_pages;
182 	u_int64_t		state_ppn;
183 	u_int64_t		req_page_ppn[VMWPVS_MAX_RING_PAGES];
184 	u_int64_t		cmp_page_ppn[VMWPVS_MAX_RING_PAGES];
185 } __packed;
186 
187 #define VMWPVS_MAX_MSG_RING_PAGES	16
188 struct vmwpvs_setup_rings_msg {
189 	u_int32_t		msg_pages;
190 	u_int32_t		__reserved;
191 	u_int64_t		msg_page_ppn[VMWPVS_MAX_MSG_RING_PAGES];
192 } __packed;
193 
194 #define VMWPVS_CMD_FIRST		0
195 #define VMWPVS_CMD_ADAPTER_RESET	1
196 #define VMWPVS_CMD_ISSUE_SCSI		2
197 #define VMWPVS_CMD_SETUP_RINGS		3
198 #define VMWPVS_CMD_RESET_BUS		4
199 #define VMWPVS_CMD_RESET_DEVICE		5
200 #define VMWPVS_CMD_ABORT_CMD		6
201 #define VMWPVS_CMD_CONFIG		7
202 #define VMWPVS_CMD_SETUP_MSG_RING	8
203 #define VMWPVS_CMD_DEVICE_UNPLUG	9
204 #define VMWPVS_CMD_LAST			10
205 
206 #define VMWPVS_CFGPG_CONTROLLER		0x1958
207 #define VMWPVS_CFGPG_PHY		0x1959
208 #define VMWPVS_CFGPG_DEVICE		0x195a
209 
210 #define VMWPVS_CFGPGADDR_CONTROLLER	0x2120
211 #define VMWPVS_CFGPGADDR_TARGET		0x2121
212 #define VMWPVS_CFGPGADDR_PHY		0x2122
213 
214 struct vmwpvs_cfg_pg_header {
215 	u_int32_t		pg_num;
216 	u_int16_t		num_dwords;
217 	u_int16_t		host_status;
218 	u_int16_t		scsi_status;
219 	u_int16_t		__reserved[3];
220 } __packed;
221 
222 #define VMWPVS_HOST_STATUS_SUCCESS	0x00
223 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED 0x0a
224 #define VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG 0x0b
225 #define VMWPVS_HOST_STATUS_UNDERRUN	0x0c
226 #define VMWPVS_HOST_STATUS_SELTIMEOUT	0x11
227 #define VMWPVS_HOST_STATUS_DATARUN	0x12
228 #define VMWPVS_HOST_STATUS_BUSFREE	0x13
229 #define VMWPVS_HOST_STATUS_INVPHASE	0x14
230 #define VMWPVS_HOST_STATUS_LUNMISMATCH	0x17
231 #define VMWPVS_HOST_STATUS_INVPARAM	0x1a
232 #define VMWPVS_HOST_STATUS_SENSEFAILED	0x1b
233 #define VMWPVS_HOST_STATUS_TAGREJECT	0x1c
234 #define VMWPVS_HOST_STATUS_BADMSG	0x1d
235 #define VMWPVS_HOST_STATUS_HAHARDWARE	0x20
236 #define VMWPVS_HOST_STATUS_NORESPONSE	0x21
237 #define VMWPVS_HOST_STATUS_SENT_RST	0x22
238 #define VMWPVS_HOST_STATUS_RECV_RST	0x23
239 #define VMWPVS_HOST_STATUS_DISCONNECT	0x24
240 #define VMWPVS_HOST_STATUS_BUS_RESET	0x25
241 #define VMWPVS_HOST_STATUS_ABORT_QUEUE	0x26
242 #define VMWPVS_HOST_STATUS_HA_SOFTWARE	0x27
243 #define VMWPVS_HOST_STATUS_HA_TIMEOUT	0x30
244 #define VMWPVS_HOST_STATUS_SCSI_PARITY	0x34
245 
246 #define VMWPVS_SCSI_STATUS_OK		0x00
247 #define VMWPVS_SCSI_STATUS_CHECK	0x02
248 
249 struct vmwpvs_cfg_pg_controller {
250 	struct vmwpvs_cfg_pg_header header;
251 
252 	u_int64_t		wwnn;
253 	u_int16_t		manufacturer[64];
254 	u_int16_t		serial_number[64];
255 	u_int16_t		oprom_version[32];
256 	u_int16_t		hardware_version[32];
257 	u_int16_t		firmware_version[32];
258 	u_int32_t		num_phys;
259 	u_int8_t		use_consec_phy_wwns;
260 	u_int8_t		__reserved[3];
261 } __packed;
262 
263 /* driver stuff */
264 
265 struct vmwpvs_dmamem {
266 	bus_dmamap_t		dm_map;
267 	bus_dma_segment_t	dm_seg;
268 	size_t			dm_size;
269 	caddr_t			dm_kva;
270 };
271 #define VMWPVS_DMA_MAP(_dm)	(_dm)->dm_map
272 #define VMWPVS_DMA_DVA(_dm)	(_dm)->dm_map->dm_segs[0].ds_addr
273 #define VMWPVS_DMA_KVA(_dm)	(void *)(_dm)->dm_kva
274 
275 struct vmwpvs_sgl {
276 	struct vmwpvs_sge	list[VMWPVS_MAXSGL];
277 } __packed;
278 
279 struct vmwpvs_ccb {
280 	SIMPLEQ_ENTRY(vmwpvs_ccb)
281 				ccb_entry;
282 
283 	bus_dmamap_t		ccb_dmamap;
284 	struct scsi_xfer	*ccb_xs;
285 	u_int64_t		ccb_ctx;
286 
287 	struct vmwpvs_sgl	*ccb_sgl;
288 	bus_addr_t		ccb_sgl_offset;
289 
290 	void			*ccb_sense;
291 	bus_addr_t		ccb_sense_offset;
292 };
293 SIMPLEQ_HEAD(vmwpvs_ccb_list, vmwpvs_ccb);
294 
295 struct vmwpvs_softc {
296 	struct device		sc_dev;
297 
298 	pci_chipset_tag_t	sc_pc;
299 	pcitag_t		sc_tag;
300 
301 	bus_space_tag_t		sc_iot;
302 	bus_space_handle_t	sc_ioh;
303 	bus_size_t		sc_ios;
304 	bus_dma_tag_t		sc_dmat;
305 
306 	struct vmwpvs_dmamem	*sc_req_ring;
307 	struct vmwpvs_dmamem	*sc_cmp_ring;
308 	struct vmwpvs_dmamem	*sc_msg_ring;
309 	struct vmwpvs_dmamem	*sc_ring_state;
310 	struct mutex		sc_ring_mtx;
311 
312 	struct vmwpvs_dmamem	*sc_sgls;
313 	struct vmwpvs_dmamem	*sc_sense;
314 	struct vmwpvs_ccb	*sc_ccbs;
315 	struct vmwpvs_ccb_list	sc_ccb_list;
316 	struct mutex		sc_ccb_mtx;
317 
318 	void			*sc_ih;
319 
320 	struct task		sc_msg_task;
321 
322 	u_int			sc_bus_width;
323 
324 	struct scsi_link	sc_link;
325 	struct scsi_iopool	sc_iopool;
326 	struct scsibus_softc	*sc_scsibus;
327 };
328 #define DEVNAME(_s)		((_s)->sc_dev.dv_xname)
329 
330 int	vmwpvs_match(struct device *, void *, void *);
331 void	vmwpvs_attach(struct device *, struct device *, void *);
332 
333 int	vmwpvs_intx(void *);
334 int	vmwpvs_intr(void *);
335 
336 #define vmwpvs_read(_s, _r) \
337 	bus_space_read_4((_s)->sc_iot, (_s)->sc_ioh, (_r))
338 #define vmwpvs_write(_s, _r, _v) \
339 	bus_space_write_4((_s)->sc_iot, (_s)->sc_ioh, (_r), (_v))
340 #define vmwpvs_barrier(_s, _r, _l, _d) \
341 	bus_space_barrier((_s)->sc_iot, (_s)->sc_ioh, (_r), (_l), (_d))
342 
343 struct cfattach vmwpvs_ca = {
344 	sizeof(struct vmwpvs_softc),
345 	vmwpvs_match,
346 	vmwpvs_attach,
347 	NULL
348 };
349 
350 struct cfdriver vmwpvs_cd = {
351 	NULL,
352 	"vmwpvs",
353 	DV_DULL
354 };
355 
356 void		vmwpvs_scsi_cmd(struct scsi_xfer *);
357 
358 struct scsi_adapter vmwpvs_switch = {
359 	vmwpvs_scsi_cmd,
360 	scsi_minphys,
361 	NULL,
362 	NULL,
363 	NULL
364 };
365 
366 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
367 
368 void		vmwpvs_ccb_put(void *, void *);
369 void *		vmwpvs_ccb_get(void *);
370 
371 struct vmwpvs_dmamem *
372 		vmwpvs_dmamem_alloc(struct vmwpvs_softc *, size_t);
373 struct vmwpvs_dmamem *
374 		vmwpvs_dmamem_zalloc(struct vmwpvs_softc *, size_t);
375 void		vmwpvs_dmamem_free(struct vmwpvs_softc *,
376 		    struct vmwpvs_dmamem *);
377 
378 void		vmwpvs_cmd(struct vmwpvs_softc *, u_int32_t, void *, size_t);
379 int		vmwpvs_get_config(struct vmwpvs_softc *);
380 void		vmwpvs_setup_rings(struct vmwpvs_softc *);
381 void		vmwpvs_setup_msg_ring(struct vmwpvs_softc *);
382 void		vmwpvs_msg_task(void *);
383 
384 struct vmwpvs_ccb *
385 		vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *);
386 struct vmwpvs_ccb *
387 		vmwpvs_scsi_cmd_done(struct vmwpvs_softc *,
388 		    struct vmwpvs_ring_cmp *);
389 
390 int
391 vmwpvs_match(struct device *parent, void *match, void *aux)
392 {
393 	struct pci_attach_args *pa = aux;
394 
395 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VMWARE &&
396 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VMWARE_PVSCSI)
397 		return (1);
398 
399 	return (0);
400 }
401 
402 void
403 vmwpvs_attach(struct device *parent, struct device *self, void *aux)
404 {
405 	struct vmwpvs_softc *sc = (struct vmwpvs_softc *)self;
406 	struct pci_attach_args *pa = aux;
407 	struct scsibus_attach_args saa;
408 	pcireg_t memtype;
409 	u_int i, r, use_msg;
410 	int (*isr)(void *) = vmwpvs_intx;
411 	u_int32_t intmask;
412 	pci_intr_handle_t ih;
413 
414 	struct vmwpvs_ccb *ccb;
415 	struct vmwpvs_sgl *sgls;
416 	u_int8_t *sense;
417 
418 	sc->sc_pc = pa->pa_pc;
419 	sc->sc_tag = pa->pa_tag;
420 	sc->sc_dmat = pa->pa_dmat;
421 
422 	sc->sc_bus_width = 16;
423 	mtx_init(&sc->sc_ring_mtx, IPL_BIO);
424 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
425 	task_set(&sc->sc_msg_task, vmwpvs_msg_task, sc);
426 	SIMPLEQ_INIT(&sc->sc_ccb_list);
427 
428 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
429 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
430 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
431 			break;
432 	}
433 	if (r >= PCI_MAPREG_END) {
434 		printf(": unable to locate registers\n");
435 		return;
436 	}
437 
438 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
439 	    NULL, &sc->sc_ios, VMMPVS_PG_LEN) != 0) {
440 		printf(": unable to map registers\n");
441 		return;
442 	}
443 
444 	/* hook up the interrupt */
445 	vmwpvs_write(sc, VMWPVS_R_INTR_MASK, 0);
446 
447 	if (pci_intr_map_msi(pa, &ih) == 0)
448 		isr = vmwpvs_intr;
449 	else if (pci_intr_map(pa, &ih) != 0) {
450 		printf(": unable to map interrupt\n");
451 		goto unmap;
452 	}
453 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
454 
455 	/* do we have msg support? */
456 	vmwpvs_write(sc, VMWPVS_R_COMMAND, VMWPVS_CMD_SETUP_MSG_RING);
457 	use_msg = (vmwpvs_read(sc, VMWPVS_R_COMMAND_STATUS) != 0xffffffff);
458 
459 	if (vmwpvs_get_config(sc) != 0) {
460 		printf("%s: get configuration failed\n", DEVNAME(sc));
461 		goto unmap;
462 	}
463 
464 	sc->sc_ring_state = vmwpvs_dmamem_zalloc(sc, VMWPVS_PAGE_SIZE);
465 	if (sc->sc_ring_state == NULL) {
466 		printf("%s: unable to allocate ring state\n", DEVNAME(sc));
467 		goto unmap;
468 	}
469 
470 	sc->sc_req_ring = vmwpvs_dmamem_zalloc(sc,
471 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
472 	if (sc->sc_req_ring == NULL) {
473 		printf("%s: unable to allocate req ring\n", DEVNAME(sc));
474 		goto free_ring_state;
475 	}
476 
477 	sc->sc_cmp_ring = vmwpvs_dmamem_zalloc(sc,
478 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
479 	if (sc->sc_cmp_ring == NULL) {
480 		printf("%s: unable to allocate cmp ring\n", DEVNAME(sc));
481 		goto free_req_ring;
482 	}
483 
484 	if (use_msg) {
485 		sc->sc_msg_ring = vmwpvs_dmamem_zalloc(sc,
486 		    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE);
487 		if (sc->sc_msg_ring == NULL) {
488 			printf("%s: unable to allocate msg ring\n",
489 			    DEVNAME(sc));
490 			goto free_cmp_ring;
491 		}
492 	}
493 
494 	r = (VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE) /
495 	    sizeof(struct vmwpvs_ring_req);
496 
497 	sc->sc_sgls = vmwpvs_dmamem_alloc(sc, r * sizeof(struct vmwpvs_sgl));
498 	if (sc->sc_sgls == NULL) {
499 		printf("%s: unable to allocate sgls\n", DEVNAME(sc));
500 		goto free_msg_ring;
501 	}
502 
503 	sc->sc_sense = vmwpvs_dmamem_alloc(sc, r * VMWPVS_SENSELEN);
504 	if (sc->sc_sense == NULL) {
505 		printf("%s: unable to allocate sense data\n", DEVNAME(sc));
506 		goto free_sgl;
507 	}
508 
509 	sc->sc_ccbs = mallocarray(r, sizeof(struct vmwpvs_ccb),
510 	    M_DEVBUF, M_WAITOK);
511 	/* cant fail */
512 
513 	sgls = VMWPVS_DMA_KVA(sc->sc_sgls);
514 	sense = VMWPVS_DMA_KVA(sc->sc_sense);
515 	for (i = 0; i < r; i++) {
516 		ccb = &sc->sc_ccbs[i];
517 
518 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
519 		    VMWPVS_MAXSGL, MAXPHYS, 0,
520 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
521 		    &ccb->ccb_dmamap) != 0) {
522 			printf("%s: unable to create ccb map\n", DEVNAME(sc));
523 			goto free_ccbs;
524 		}
525 
526 		ccb->ccb_ctx = 0xdeadbeef00000000ULL | (u_int64_t)i;
527 
528 		ccb->ccb_sgl_offset = i * sizeof(*sgls);
529 		ccb->ccb_sgl = &sgls[i];
530 
531 		ccb->ccb_sense_offset = i * VMWPVS_SENSELEN;
532 		ccb->ccb_sense = sense + ccb->ccb_sense_offset;
533 
534 		vmwpvs_ccb_put(sc, ccb);
535 	}
536 
537 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
538 	    isr, sc, DEVNAME(sc));
539 	if (sc->sc_ih == NULL)
540 		goto free_msg_ring;
541 
542 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring), 0,
543 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
544 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
545 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
546 	if (use_msg) {
547 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
548 		    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
549 	}
550 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
551 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
552 
553 	intmask = VMWPVS_INTR_CMPL_MASK;
554 
555 	vmwpvs_setup_rings(sc);
556 	if (use_msg) {
557 		vmwpvs_setup_msg_ring(sc);
558 		intmask |= VMWPVS_INTR_MSG_MASK;
559 	}
560 
561 	vmwpvs_write(sc, VMWPVS_R_INTR_MASK, intmask);
562 
563 	/* controller init is done, lets plug the midlayer in */
564 
565 	scsi_iopool_init(&sc->sc_iopool, sc, vmwpvs_ccb_get, vmwpvs_ccb_put);
566 
567 	sc->sc_link.adapter = &vmwpvs_switch;
568 	sc->sc_link.adapter_softc = sc;
569 	sc->sc_link.adapter_target = -1;
570 	sc->sc_link.adapter_buswidth = sc->sc_bus_width;
571 	sc->sc_link.openings = VMWPVS_OPENINGS;
572 	sc->sc_link.pool = &sc->sc_iopool;
573 
574 	bzero(&saa, sizeof(saa));
575 	saa.saa_sc_link = &sc->sc_link;
576 
577 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,
578 	    &saa, scsiprint);
579 
580 	return;
581 free_ccbs:
582 	while ((ccb = vmwpvs_ccb_get(sc)) != NULL)
583 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
584 	free(sc->sc_ccbs, M_DEVBUF, r * sizeof(struct vmwpvs_ccb));
585 /* free_sense: */
586 	vmwpvs_dmamem_free(sc, sc->sc_sense);
587 free_sgl:
588 	vmwpvs_dmamem_free(sc, sc->sc_sgls);
589 free_msg_ring:
590 	if (use_msg)
591 		vmwpvs_dmamem_free(sc, sc->sc_msg_ring);
592 free_cmp_ring:
593 	vmwpvs_dmamem_free(sc, sc->sc_cmp_ring);
594 free_req_ring:
595 	vmwpvs_dmamem_free(sc, sc->sc_req_ring);
596 free_ring_state:
597 	vmwpvs_dmamem_free(sc, sc->sc_ring_state);
598 unmap:
599 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
600 	sc->sc_ios = 0;
601 }
602 
603 void
604 vmwpvs_setup_rings(struct vmwpvs_softc *sc)
605 {
606 	struct vmwpvs_setup_rings_cmd cmd;
607 	u_int64_t ppn;
608 	u_int i;
609 
610 	memset(&cmd, 0, sizeof(cmd));
611 	cmd.req_pages = VMWPVS_RING_PAGES;
612 	cmd.cmp_pages = VMWPVS_RING_PAGES;
613 	cmd.state_ppn = VMWPVS_DMA_DVA(sc->sc_ring_state) >> VMWPVS_PAGE_SHIFT;
614 
615 	ppn = VMWPVS_DMA_DVA(sc->sc_req_ring) >> VMWPVS_PAGE_SHIFT;
616 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
617 		cmd.req_page_ppn[i] = ppn + i;
618 
619 	ppn = VMWPVS_DMA_DVA(sc->sc_cmp_ring) >> VMWPVS_PAGE_SHIFT;
620 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
621 		cmd.cmp_page_ppn[i] = ppn + i;
622 
623 	vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_RINGS, &cmd, sizeof(cmd));
624 }
625 
626 void
627 vmwpvs_setup_msg_ring(struct vmwpvs_softc *sc)
628 {
629 	struct vmwpvs_setup_rings_msg cmd;
630 	u_int64_t ppn;
631 	u_int i;
632 
633 	memset(&cmd, 0, sizeof(cmd));
634 	cmd.msg_pages = VMWPVS_RING_PAGES;
635 
636 	ppn = VMWPVS_DMA_DVA(sc->sc_msg_ring) >> VMWPVS_PAGE_SHIFT;
637 	for (i = 0; i < VMWPVS_RING_PAGES; i++)
638 		cmd.msg_page_ppn[i] = ppn + i;
639 
640 	vmwpvs_cmd(sc, VMWPVS_CMD_SETUP_MSG_RING, &cmd, sizeof(cmd));
641 }
642 
643 int
644 vmwpvs_get_config(struct vmwpvs_softc *sc)
645 {
646 	struct vmwpvs_cfg_cmd cmd;
647 	struct vmwpvs_dmamem *dm;
648 	struct vmwpvs_cfg_pg_controller *pg;
649 	struct vmwpvs_cfg_pg_header *hdr;
650 	int rv = 0;
651 
652 	dm = vmwpvs_dmamem_alloc(sc, VMWPVS_PAGE_SIZE);
653 	if (dm == NULL)
654 		return (ENOMEM);
655 
656 	memset(&cmd, 0, sizeof(cmd));
657 	cmd.cmp_addr = VMWPVS_DMA_DVA(dm);
658 	cmd.pg_addr_type = VMWPVS_CFGPGADDR_CONTROLLER;
659 	cmd.pg_num = VMWPVS_CFGPG_CONTROLLER;
660 
661 	pg = VMWPVS_DMA_KVA(dm);
662 	memset(pg, 0, VMWPVS_PAGE_SIZE);
663 	hdr = &pg->header;
664 	hdr->host_status = VMWPVS_HOST_STATUS_INVPARAM;
665 	hdr->scsi_status = VMWPVS_SCSI_STATUS_CHECK;
666 
667 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
668 	    BUS_DMASYNC_PREREAD);
669 	vmwpvs_cmd(sc, VMWPVS_CMD_CONFIG, &cmd, sizeof(cmd));
670 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(dm), 0, VMWPVS_PAGE_SIZE,
671 	    BUS_DMASYNC_POSTREAD);
672 
673 	if (hdr->host_status != VMWPVS_HOST_STATUS_SUCCESS ||
674 	    hdr->scsi_status != VMWPVS_SCSI_STATUS_OK) {
675 		rv = EIO;
676 		goto done;
677 	}
678 
679 	sc->sc_bus_width = pg->num_phys;
680 
681 done:
682 	vmwpvs_dmamem_free(sc, dm);
683 
684 	return (rv);
685 
686 }
687 
688 void
689 vmwpvs_cmd(struct vmwpvs_softc *sc, u_int32_t cmd, void *buf, size_t len)
690 {
691 	u_int32_t *p = buf;
692 	u_int i;
693 
694 	len /= sizeof(*p);
695 
696 	vmwpvs_write(sc, VMWPVS_R_COMMAND, cmd);
697 	for (i = 0; i < len; i++)
698 		vmwpvs_write(sc, VMWPVS_R_COMMAND_DATA, p[i]);
699 }
700 
701 int
702 vmwpvs_intx(void *xsc)
703 {
704 	struct vmwpvs_softc *sc = xsc;
705 	u_int32_t status;
706 
707 	status = vmwpvs_read(sc, VMWPVS_R_INTR_STATUS);
708 	if ((status & VMWPVS_INTR_ALL_MASK) == 0)
709 		return (0);
710 
711 	vmwpvs_write(sc, VMWPVS_R_INTR_STATUS, status);
712 
713 	return (vmwpvs_intr(sc));
714 }
715 
716 int
717 vmwpvs_intr(void *xsc)
718 {
719 	struct vmwpvs_softc *sc = xsc;
720 	volatile struct vmwpvw_ring_state *s =
721 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
722 	struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
723 	struct vmwpvs_ccb_list list = SIMPLEQ_HEAD_INITIALIZER(list);
724 	struct vmwpvs_ccb *ccb;
725 	u_int32_t cons, prod;
726 	int msg;
727 
728 	mtx_enter(&sc->sc_ring_mtx);
729 
730 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
731 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
732 	cons = s->cmp_cons;
733 	prod = s->cmp_prod;
734 	s->cmp_cons = prod;
735 
736 	msg = (sc->sc_msg_ring != NULL && s->msg_cons != s->msg_prod);
737 
738 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
739 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
740 
741 	if (cons != prod) {
742 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
743 		    0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
744 
745 		do {
746 			ccb = vmwpvs_scsi_cmd_done(sc,
747 			    &ring[cons++ % VMWPVS_CMP_COUNT]);
748 			SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
749 		} while (cons != prod);
750 
751 		bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
752 		    0, VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
753 	}
754 
755 	mtx_leave(&sc->sc_ring_mtx);
756 
757 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
758 		SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
759 		scsi_done(ccb->ccb_xs);
760 	}
761 
762 	if (msg)
763 		task_add(systq, &sc->sc_msg_task);
764 
765 	return (1);
766 }
767 
768 void
769 vmwpvs_msg_task(void *xsc)
770 {
771 	struct vmwpvs_softc *sc = xsc;
772 	volatile struct vmwpvw_ring_state *s =
773 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
774 	struct vmwpvs_ring_msg *ring = VMWPVS_DMA_KVA(sc->sc_msg_ring);
775 	struct vmwpvs_ring_msg *msg;
776 	struct vmwpvs_ring_msg_dev *dvmsg;
777 	u_int32_t cons, prod;
778 
779 	mtx_enter(&sc->sc_ring_mtx);
780 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
781 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
782 	cons = s->msg_cons;
783 	prod = s->msg_prod;
784 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
785 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
786 	mtx_leave(&sc->sc_ring_mtx);
787 
788 	/*
789 	 * we dont have to lock around the msg ring cos the system taskq has
790 	 * only one thread.
791 	 */
792 
793 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
794 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD);
795 	while (cons != prod) {
796 		msg = &ring[cons++ % VMWPVS_MSG_COUNT];
797 
798 		switch (letoh32(msg->type)) {
799 		case VMWPVS_MSG_T_ADDED:
800 			dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
801 			if (letoh32(dvmsg->bus) != 0) {
802 				printf("%s: ignoring request to add device"
803 				    " on bus %d\n", DEVNAME(sc),
804 				    letoh32(msg->type));
805 				break;
806 			}
807 
808 			if (scsi_probe_lun(sc->sc_scsibus,
809 			    letoh32(dvmsg->target), dvmsg->lun[1]) != 0) {
810 				printf("%s: error probing target %d lun %d\n",
811 				    DEVNAME(sc), letoh32(dvmsg->target),
812 				    dvmsg->lun[1]);
813 			};
814 			break;
815 
816 		case VMWPVS_MSG_T_REMOVED:
817 			dvmsg = (struct vmwpvs_ring_msg_dev *)msg;
818 			if (letoh32(dvmsg->bus) != 0) {
819 				printf("%s: ignorint request to remove device"
820 				    " on bus %d\n", DEVNAME(sc),
821 				    letoh32(msg->type));
822 				break;
823 			}
824 
825 			if (scsi_detach_lun(sc->sc_scsibus,
826 			    letoh32(dvmsg->target), dvmsg->lun[1],
827 			    DETACH_FORCE) != 0) {
828 				printf("%s: error detaching target %d lun %d\n",
829 				    DEVNAME(sc), letoh32(dvmsg->target),
830 				    dvmsg->lun[1]);
831 			};
832 			break;
833 
834 		default:
835 			printf("%s: unknown msg type %u\n", DEVNAME(sc),
836 			    letoh32(msg->type));
837 			break;
838 		}
839 	}
840 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_msg_ring), 0,
841 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD);
842 
843 	mtx_enter(&sc->sc_ring_mtx);
844 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
845 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
846 	s->msg_cons = prod;
847 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
848 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
849 	mtx_leave(&sc->sc_ring_mtx);
850 }
851 
852 void
853 vmwpvs_scsi_cmd(struct scsi_xfer *xs)
854 {
855 	struct scsi_link *link = xs->sc_link;
856 	struct vmwpvs_softc *sc = link->adapter_softc;
857 	struct vmwpvs_ccb *ccb = xs->io;
858 	bus_dmamap_t dmap = ccb->ccb_dmamap;
859 	volatile struct vmwpvw_ring_state *s =
860 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
861 	struct vmwpvs_ring_req *ring = VMWPVS_DMA_KVA(sc->sc_req_ring), *r;
862 	u_int32_t prod;
863 	struct vmwpvs_ccb_list list;
864 	int error;
865 	u_int i;
866 
867 	ccb->ccb_xs = xs;
868 
869 	if (xs->datalen > 0) {
870 		error = bus_dmamap_load(sc->sc_dmat, dmap,
871 		    xs->data, xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
872 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
873 		if (error) {
874 			xs->error = XS_DRIVER_STUFFUP;
875 			scsi_done(xs);
876 			return;
877 		}
878 
879 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
880 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
881 		    BUS_DMASYNC_PREWRITE);
882 	}
883 
884 	mtx_enter(&sc->sc_ring_mtx);
885 
886 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
887 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
888 
889 	prod = s->req_prod;
890 	r = &ring[prod % VMWPVS_REQ_COUNT];
891 
892 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring),
893 	    prod * sizeof(*r), sizeof(*r), BUS_DMASYNC_POSTWRITE);
894 
895 	memset(r, 0, sizeof(*r));
896 	r->context = ccb->ccb_ctx;
897 
898 	if (xs->datalen > 0) {
899 		r->data_len = xs->datalen;
900 		if (dmap->dm_nsegs == 1) {
901 			r->data_addr = dmap->dm_segs[0].ds_addr;
902 		} else {
903 			struct vmwpvs_sge *sgl = ccb->ccb_sgl->list, *sge;
904 
905 			r->data_addr = VMWPVS_DMA_DVA(sc->sc_sgls) +
906 			    ccb->ccb_sgl_offset;
907 			r->flags = VMWPVS_REQ_SGL;
908 
909 			for (i = 0; i < dmap->dm_nsegs; i++) {
910 				sge = &sgl[i];
911 				sge->addr = dmap->dm_segs[i].ds_addr;
912 				sge->len = dmap->dm_segs[i].ds_len;
913 				sge->flags = 0;
914 			}
915 
916 			bus_dmamap_sync(sc->sc_dmat,
917 			    VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
918 			    sizeof(*sge) * dmap->dm_nsegs,
919 			    BUS_DMASYNC_PREWRITE);
920 		}
921 	}
922 	r->sense_addr = VMWPVS_DMA_DVA(sc->sc_sense) + ccb->ccb_sense_offset;
923 	r->sense_len = sizeof(xs->sense);
924 
925 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
926 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_POSTWRITE);
927 
928 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
929 	case SCSI_DATA_IN:
930 		r->flags |= VMWPVS_REQ_DIR_IN;
931 		break;
932 	case SCSI_DATA_OUT:
933 		r->flags |= VMWPVS_REQ_DIR_OUT;
934 		break;
935 	default:
936 		r->flags |= VMWPVS_REQ_DIR_NONE;
937 		break;
938 	}
939 
940 	memcpy(r->cdb, xs->cmd, xs->cmdlen);
941 	r->cdblen = xs->cmdlen;
942 	r->lun[1] = link->lun; /* ugly :( */
943 	r->tag = MSG_SIMPLE_Q_TAG;
944 	r->bus = 0;
945 	r->target = link->target;
946 	r->vcpu_hint = 0;
947 
948 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_req_ring), 0,
949 	    VMWPVS_RING_PAGES * VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREWRITE);
950 
951 	s->req_prod = prod + 1;
952 
953 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_ring_state), 0,
954 	    VMWPVS_PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
955 
956 	vmwpvs_write(sc, xs->bp == NULL ?
957 	    VMWPVS_R_KICK_NON_RW_IO : VMWPVS_R_KICK_RW_IO, 0);
958 
959 	if (!ISSET(xs->flags, SCSI_POLL)) {
960 		mtx_leave(&sc->sc_ring_mtx);
961 		return;
962 	}
963 
964 	SIMPLEQ_INIT(&list);
965 	do {
966 		ccb = vmwpvs_scsi_cmd_poll(sc);
967 		SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_entry);
968 	} while (xs->io != ccb);
969 
970 	mtx_leave(&sc->sc_ring_mtx);
971 
972 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
973 		SIMPLEQ_REMOVE_HEAD(&list, ccb_entry);
974 		scsi_done(ccb->ccb_xs);
975 	}
976 }
977 
978 struct vmwpvs_ccb *
979 vmwpvs_scsi_cmd_poll(struct vmwpvs_softc *sc)
980 {
981 	volatile struct vmwpvw_ring_state *s =
982 	    VMWPVS_DMA_KVA(sc->sc_ring_state);
983 	struct vmwpvs_ring_cmp *ring = VMWPVS_DMA_KVA(sc->sc_cmp_ring);
984 	struct vmwpvs_ccb *ccb;
985 	u_int32_t prod, cons;
986 
987 	for (;;) {
988 		bus_dmamap_sync(sc->sc_dmat,
989 		    VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
990 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
991 
992 		cons = s->cmp_cons;
993 		prod = s->cmp_prod;
994 
995 		if (cons != prod)
996 			s->cmp_cons = cons + 1;
997 
998 		bus_dmamap_sync(sc->sc_dmat,
999 		    VMWPVS_DMA_MAP(sc->sc_ring_state), 0, VMWPVS_PAGE_SIZE,
1000 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1001 
1002 		if (cons != prod)
1003 			break;
1004 		else
1005 			delay(1000);
1006 	}
1007 
1008 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
1009 	    0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
1010 	    BUS_DMASYNC_POSTREAD);
1011 	ccb = vmwpvs_scsi_cmd_done(sc, &ring[cons % VMWPVS_CMP_COUNT]);
1012 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_cmp_ring),
1013 	    0, VMWPVS_PAGE_SIZE * VMWPVS_RING_PAGES,
1014 	    BUS_DMASYNC_PREREAD);
1015 
1016 	return (ccb);
1017 }
1018 
1019 struct vmwpvs_ccb *
1020 vmwpvs_scsi_cmd_done(struct vmwpvs_softc *sc, struct vmwpvs_ring_cmp *c)
1021 {
1022 	u_int64_t ctx = c->context;
1023 	struct vmwpvs_ccb *ccb = &sc->sc_ccbs[ctx & 0xffffffff];
1024 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1025 	struct scsi_xfer *xs = ccb->ccb_xs;
1026 
1027 	bus_dmamap_sync(sc->sc_dmat, VMWPVS_DMA_MAP(sc->sc_sense),
1028 	    ccb->ccb_sense_offset, sizeof(xs->sense), BUS_DMASYNC_POSTREAD);
1029 
1030 	if (xs->datalen > 0) {
1031 		if (dmap->dm_nsegs > 1) {
1032 			bus_dmamap_sync(sc->sc_dmat,
1033 			    VMWPVS_DMA_MAP(sc->sc_sgls), ccb->ccb_sgl_offset,
1034 			    sizeof(struct vmwpvs_sge) * dmap->dm_nsegs,
1035 			    BUS_DMASYNC_POSTWRITE);
1036 		}
1037 
1038 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1039 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1040 		    BUS_DMASYNC_POSTWRITE);
1041 
1042 		bus_dmamap_unload(sc->sc_dmat, dmap);
1043 	}
1044 
1045 	xs->status = c->scsi_status;
1046 	switch (c->host_status) {
1047 	case VMWPVS_HOST_STATUS_SUCCESS:
1048 	case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED:
1049 	case VMWPVS_HOST_STATUS_LINKED_CMD_COMPLETED_WITH_FLAG:
1050 		if (c->scsi_status == VMWPVS_SCSI_STATUS_CHECK) {
1051 			memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1052 			xs->error = XS_SENSE;
1053 		} else
1054 			xs->error = XS_NOERROR;
1055 		xs->resid = 0;
1056 		break;
1057 
1058 	case VMWPVS_HOST_STATUS_UNDERRUN:
1059 	case VMWPVS_HOST_STATUS_DATARUN:
1060 		xs->resid = xs->datalen - c->data_len;
1061 		xs->error = XS_NOERROR;
1062 		break;
1063 
1064 	case VMWPVS_HOST_STATUS_SELTIMEOUT:
1065 		xs->error = XS_SELTIMEOUT;
1066 		break;
1067 
1068 	default:
1069 		printf("%s: %s:%d h:0x%x s:0x%x\n", DEVNAME(sc),
1070 		    __FUNCTION__, __LINE__, c->host_status, c->scsi_status);
1071 		xs->error = XS_DRIVER_STUFFUP;
1072 		break;
1073 	}
1074 
1075 	return (ccb);
1076 }
1077 
1078 void *
1079 vmwpvs_ccb_get(void *xsc)
1080 {
1081 	struct vmwpvs_softc *sc = xsc;
1082 	struct vmwpvs_ccb *ccb;
1083 
1084 	mtx_enter(&sc->sc_ccb_mtx);
1085 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
1086 	if (ccb != NULL)
1087 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_list, ccb_entry);
1088 	mtx_leave(&sc->sc_ccb_mtx);
1089 
1090 	return (ccb);
1091 }
1092 
1093 void
1094 vmwpvs_ccb_put(void *xsc, void *io)
1095 {
1096 	struct vmwpvs_softc *sc = xsc;
1097 	struct vmwpvs_ccb *ccb = io;
1098 
1099 	mtx_enter(&sc->sc_ccb_mtx);
1100 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
1101 	mtx_leave(&sc->sc_ccb_mtx);
1102 }
1103 
1104 struct vmwpvs_dmamem *
1105 vmwpvs_dmamem_alloc(struct vmwpvs_softc *sc, size_t size)
1106 {
1107 	struct vmwpvs_dmamem *dm;
1108 	int nsegs;
1109 
1110 	dm = malloc(sizeof(*dm), M_DEVBUF, M_NOWAIT | M_ZERO);
1111 	if (dm == NULL)
1112 		return (NULL);
1113 
1114 	dm->dm_size = size;
1115 
1116 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1117 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map) != 0)
1118 		goto dmfree;
1119 
1120 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &dm->dm_seg,
1121 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1122 		goto destroy;
1123 
1124 	if (bus_dmamem_map(sc->sc_dmat, &dm->dm_seg, nsegs, size,
1125 	    &dm->dm_kva, BUS_DMA_NOWAIT) != 0)
1126 		goto free;
1127 
1128 	if (bus_dmamap_load(sc->sc_dmat, dm->dm_map, dm->dm_kva, size,
1129 	    NULL, BUS_DMA_NOWAIT) != 0)
1130 		goto unmap;
1131 
1132 	return (dm);
1133 
1134 unmap:
1135 	bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, size);
1136 free:
1137 	bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1138 destroy:
1139 	bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1140 dmfree:
1141 	free(dm, M_DEVBUF, sizeof *dm);
1142 
1143 	return (NULL);
1144 }
1145 
1146 struct vmwpvs_dmamem *
1147 vmwpvs_dmamem_zalloc(struct vmwpvs_softc *sc, size_t size)
1148 {
1149 	struct vmwpvs_dmamem *dm;
1150 
1151 	dm = vmwpvs_dmamem_alloc(sc, size);
1152 	if (dm == NULL)
1153 		return (NULL);
1154 
1155 	memset(VMWPVS_DMA_KVA(dm), 0, size);
1156 
1157 	return (dm);
1158 }
1159 
1160 void
1161 vmwpvs_dmamem_free(struct vmwpvs_softc *sc, struct vmwpvs_dmamem *dm)
1162 {
1163 	bus_dmamap_unload(sc->sc_dmat, dm->dm_map);
1164 	bus_dmamem_unmap(sc->sc_dmat, dm->dm_kva, dm->dm_size);
1165 	bus_dmamem_free(sc->sc_dmat, &dm->dm_seg, 1);
1166 	bus_dmamap_destroy(sc->sc_dmat, dm->dm_map);
1167 	free(dm, M_DEVBUF, sizeof *dm);
1168 }
1169