xref: /openbsd-src/sys/dev/pv/hypervic.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*-
2  * Copyright (c) 2009-2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/pool.h>
55 #include <sys/timetc.h>
56 #include <sys/task.h>
57 #include <sys/syslog.h>
58 #include <sys/socket.h>
59 #include <sys/sensors.h>
60 
61 #include <machine/bus.h>
62 #include <machine/cpu.h>
63 #include <machine/cpufunc.h>
64 
65 #include <machine/i82489var.h>
66 
67 #include <dev/rndvar.h>
68 
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 
74 #include <dev/pv/pvvar.h>
75 #include <dev/pv/pvreg.h>
76 #include <dev/pv/hypervreg.h>
77 #include <dev/pv/hypervvar.h>
78 #include <dev/pv/hypervicreg.h>
79 
80 struct hv_ic_dev;
81 
82 #define NKVPPOOLS			4
83 #define MAXPOOLENTS			1023
84 
85 struct kvp_entry {
86 	int				kpe_index;
87 	uint32_t			kpe_valtype;
88 	uint8_t				kpe_key[HV_KVP_MAX_KEY_SIZE / 2];
89 	uint8_t				kpe_val[HV_KVP_MAX_VAL_SIZE / 2];
90 	TAILQ_ENTRY(kvp_entry)		kpe_entry;
91 };
92 TAILQ_HEAD(kvp_list, kvp_entry);
93 
94 struct kvp_pool {
95 	struct kvp_list			kvp_entries;
96 	struct mutex			kvp_lock;
97 	u_int				kvp_index;
98 };
99 
100 struct pool				kvp_entry_pool;
101 
102 struct hv_kvp {
103 	struct kvp_pool			kvp_pool[NKVPPOOLS];
104 };
105 
106 int	hv_heartbeat_attach(struct hv_ic_dev *);
107 void	hv_heartbeat(void *);
108 int	hv_kvp_attach(struct hv_ic_dev *);
109 void	hv_kvp(void *);
110 int	hv_kvop(void *, int, char *, char *, size_t);
111 int	hv_shutdown_attach(struct hv_ic_dev *);
112 void	hv_shutdown(void *);
113 int	hv_timesync_attach(struct hv_ic_dev *);
114 void	hv_timesync(void *);
115 
116 static struct hv_ic_dev {
117 	const char		 *dv_name;
118 	const struct hv_guid	 *dv_type;
119 	int			(*dv_attach)(struct hv_ic_dev *);
120 	void			(*dv_handler)(void *);
121 	struct hv_channel	 *dv_ch;
122 	uint8_t			 *dv_buf;
123 	void			 *dv_priv;
124 } hv_ic_devs[] = {
125 	{
126 		"heartbeat",
127 		&hv_guid_heartbeat,
128 		hv_heartbeat_attach,
129 		hv_heartbeat
130 	},
131 	{
132 		"kvp",
133 		&hv_guid_kvp,
134 		hv_kvp_attach,
135 		hv_kvp
136 	},
137 	{
138 		"shutdown",
139 		&hv_guid_shutdown,
140 		hv_shutdown_attach,
141 		hv_shutdown
142 	},
143 	{
144 		"timesync",
145 		&hv_guid_timesync,
146 		hv_timesync_attach,
147 		hv_timesync
148 	}
149 };
150 
151 static const struct {
152 	enum hv_kvp_pool		 poolidx;
153 	const char			*poolname;
154 	size_t				 poolnamelen;
155 } kvp_pools[] = {
156 	{ HV_KVP_POOL_EXTERNAL,		"External",	sizeof("External") },
157 	{ HV_KVP_POOL_GUEST,		"Guest",	sizeof("Guest")	},
158 	{ HV_KVP_POOL_AUTO,		"Auto",		sizeof("Auto") },
159 	{ HV_KVP_POOL_AUTO_EXTERNAL,	"Guest/Parameters",
160 	  sizeof("Guest/Parameters") }
161 };
162 
163 static const struct {
164 	int				 keyidx;
165 	const char			*keyname;
166 	const char			*value;
167 } kvp_pool_auto[] = {
168 	{ 0, "FullyQualifiedDomainName",	hostname },
169 	{ 1, "IntegrationServicesVersion",	"6.6.6"	},
170 	{ 2, "NetworkAddressIPv4",		"127.0.0.1" },
171 	{ 3, "NetworkAddressIPv6",		"::1" },
172 	{ 4, "OSBuildNumber",			osversion },
173 	{ 5, "OSName",				ostype },
174 	{ 6, "OSMajorVersion",			"6" }, /* free commit for mike */
175 	{ 7, "OSMinorVersion",			&osrelease[2] },
176 	{ 8, "OSVersion",			osrelease },
177 #ifdef __amd64__ /* As specified in SYSTEM_INFO.wProcessorArchitecture */
178 	{ 9, "ProcessorArchitecture",		"9" }
179 #else
180 	{ 9, "ProcessorArchitecture",		"0" }
181 #endif
182 };
183 
184 void
185 hv_attach_icdevs(struct hv_softc *sc)
186 {
187 	struct hv_ic_dev *dv;
188 	struct hv_channel *ch;
189 	int i, header = 0;
190 
191 	for (i = 0; i < nitems(hv_ic_devs); i++) {
192 		dv = &hv_ic_devs[i];
193 
194 		TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
195 			if (ch->ch_state != HV_CHANSTATE_OFFERED)
196 				continue;
197 			if (ch->ch_flags & CHF_MONITOR)
198 				continue;
199 			if (memcmp(dv->dv_type, &ch->ch_type,
200 			    sizeof(ch->ch_type)) == 0)
201 				break;
202 		}
203 		if (ch == NULL)
204 			continue;
205 
206 		dv->dv_ch = ch;
207 
208 		/*
209 		 * These services are not performance critical and
210 		 * do not need batched reading. Furthermore, some
211 		 * services such as KVP can only handle one message
212 		 * from the host at a time.
213 		 */
214 		dv->dv_ch->ch_flags &= ~CHF_BATCHED;
215 
216 		if (dv->dv_attach && dv->dv_attach(dv) != 0)
217 			continue;
218 
219 		if (hv_channel_open(ch, VMBUS_IC_BUFRINGSIZE, NULL, 0,
220 		    dv->dv_handler, dv)) {
221 			printf("%s: failed to open channel for %s\n",
222 			    sc->sc_dev.dv_xname, dv->dv_name);
223 			continue;
224 		}
225 		evcount_attach(&ch->ch_evcnt, dv->dv_name, &sc->sc_idtvec);
226 
227 		if (!header) {
228 			printf("%s: %s", sc->sc_dev.dv_xname, dv->dv_name);
229 			header = 1;
230 		} else
231 			printf(", %s", dv->dv_name);
232 	}
233 	if (header)
234 		printf("\n");
235 }
236 
237 static inline void
238 hv_ic_negotiate(struct vmbus_icmsg_hdr *hdr, uint32_t *rlen, uint32_t fwver,
239     uint32_t msgver)
240 {
241 	struct vmbus_icmsg_negotiate *msg;
242 	uint16_t propmin, propmaj, chosenmaj, chosenmin;
243 	int i;
244 
245 	msg = (struct vmbus_icmsg_negotiate *)hdr;
246 
247 	chosenmaj = chosenmin = 0;
248 	for (i = 0; i < msg->ic_fwver_cnt; i++) {
249 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
250 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
251 		if (propmaj > chosenmaj &&
252 		    propmaj <= VMBUS_ICVER_MAJOR(fwver) &&
253 		    propmin >= chosenmin &&
254 		    propmin <= VMBUS_ICVER_MINOR(fwver)) {
255 			chosenmaj = propmaj;
256 			chosenmin = propmin;
257 		}
258 	}
259 	fwver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
260 
261 	chosenmaj = chosenmin = 0;
262 	for (; i < msg->ic_fwver_cnt + msg->ic_msgver_cnt; i++) {
263 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
264 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
265 		if (propmaj > chosenmaj &&
266 		    propmaj <= VMBUS_ICVER_MAJOR(msgver) &&
267 		    propmin >= chosenmin &&
268 		    propmin <= VMBUS_ICVER_MINOR(msgver)) {
269 			chosenmaj = propmaj;
270 			chosenmin = propmin;
271 		}
272 	}
273 	msgver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
274 
275 	msg->ic_fwver_cnt = 1;
276 	msg->ic_ver[0] = fwver;
277 	msg->ic_msgver_cnt = 1;
278 	msg->ic_ver[1] = msgver;
279 	hdr->ic_dsize = sizeof(*msg) + 2 * sizeof(uint32_t) -
280 	    sizeof(struct vmbus_icmsg_hdr);
281 	if (*rlen < sizeof(*msg) + 2 * sizeof(uint32_t))
282 		*rlen = sizeof(*msg) + 2 * sizeof(uint32_t);
283 }
284 
285 int
286 hv_heartbeat_attach(struct hv_ic_dev *dv)
287 {
288 	struct hv_channel *ch = dv->dv_ch;
289 	struct hv_softc *sc = ch->ch_sc;
290 
291 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
292 	    (cold ? M_NOWAIT : M_WAITOK));
293 	if (dv->dv_buf == NULL) {
294 		printf("%s: failed to allocate receive buffer\n",
295 		    sc->sc_dev.dv_xname);
296 		return (-1);
297 	}
298 	return (0);
299 }
300 
301 void
302 hv_heartbeat(void *arg)
303 {
304 	struct hv_ic_dev *dv = arg;
305 	struct hv_channel *ch = dv->dv_ch;
306 	struct hv_softc *sc = ch->ch_sc;
307 	struct vmbus_icmsg_hdr *hdr;
308 	struct vmbus_icmsg_heartbeat *msg;
309 	uint64_t rid;
310 	uint32_t rlen;
311 	int rv;
312 
313 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
314 	if (rv || rlen == 0) {
315 		if (rv != EAGAIN)
316 			DPRINTF("%s: heartbeat rv=%d rlen=%u\n",
317 			    sc->sc_dev.dv_xname, rv, rlen);
318 		return;
319 	}
320 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
321 		DPRINTF("%s: heartbeat short read rlen=%u\n",
322 			    sc->sc_dev.dv_xname, rlen);
323 		return;
324 	}
325 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
326 	switch (hdr->ic_type) {
327 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
328 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
329 		    VMBUS_IC_VERSION(3, 0));
330 		break;
331 	case VMBUS_ICMSG_TYPE_HEARTBEAT:
332 		msg = (struct vmbus_icmsg_heartbeat *)hdr;
333 		msg->ic_seq += 1;
334 		break;
335 	default:
336 		printf("%s: unhandled heartbeat message type %u\n",
337 		    sc->sc_dev.dv_xname, hdr->ic_type);
338 		return;
339 	}
340 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
341 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
342 }
343 
344 static void
345 hv_shutdown_task(void *arg)
346 {
347 	struct hv_softc *sc = arg;
348 	pvbus_shutdown(&sc->sc_dev);
349 }
350 
351 int
352 hv_shutdown_attach(struct hv_ic_dev *dv)
353 {
354 	struct hv_channel *ch = dv->dv_ch;
355 	struct hv_softc *sc = ch->ch_sc;
356 
357 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
358 	    (cold ? M_NOWAIT : M_WAITOK));
359 	if (dv->dv_buf == NULL) {
360 		printf("%s: failed to allocate receive buffer\n",
361 		    sc->sc_dev.dv_xname);
362 		return (-1);
363 	}
364 
365 	task_set(&sc->sc_sdtask, hv_shutdown_task, sc);
366 
367 	return (0);
368 }
369 
370 void
371 hv_shutdown(void *arg)
372 {
373 	struct hv_ic_dev *dv = arg;
374 	struct hv_channel *ch = dv->dv_ch;
375 	struct hv_softc *sc = ch->ch_sc;
376 	struct vmbus_icmsg_hdr *hdr;
377 	struct vmbus_icmsg_shutdown *msg;
378 	uint64_t rid;
379 	uint32_t rlen;
380 	int rv, shutdown = 0;
381 
382 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
383 	if (rv || rlen == 0) {
384 		if (rv != EAGAIN)
385 			DPRINTF("%s: shutdown rv=%d rlen=%u\n",
386 			    sc->sc_dev.dv_xname, rv, rlen);
387 		return;
388 	}
389 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
390 		DPRINTF("%s: shutdown short read rlen=%u\n",
391 			    sc->sc_dev.dv_xname, rlen);
392 		return;
393 	}
394 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
395 	switch (hdr->ic_type) {
396 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
397 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
398 		    VMBUS_IC_VERSION(3, 0));
399 		break;
400 	case VMBUS_ICMSG_TYPE_SHUTDOWN:
401 		msg = (struct vmbus_icmsg_shutdown *)hdr;
402 		if (msg->ic_haltflags == 0 || msg->ic_haltflags == 1) {
403 			shutdown = 1;
404 			hdr->ic_status = VMBUS_ICMSG_STATUS_OK;
405 		} else
406 			hdr->ic_status = VMBUS_ICMSG_STATUS_FAIL;
407 		break;
408 	default:
409 		printf("%s: unhandled shutdown message type %u\n",
410 		    sc->sc_dev.dv_xname, hdr->ic_type);
411 		return;
412 	}
413 
414 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
415 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
416 
417 	if (shutdown)
418 		task_add(systq, &sc->sc_sdtask);
419 }
420 
421 int
422 hv_timesync_attach(struct hv_ic_dev *dv)
423 {
424 	struct hv_channel *ch = dv->dv_ch;
425 	struct hv_softc *sc = ch->ch_sc;
426 
427 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
428 	    (cold ? M_NOWAIT : M_WAITOK));
429 	if (dv->dv_buf == NULL) {
430 		printf("%s: failed to allocate receive buffer\n",
431 		    sc->sc_dev.dv_xname);
432 		return (-1);
433 	}
434 
435 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
436 	    sizeof(sc->sc_sensordev.xname));
437 
438 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
439 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
440 
441 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
442 	sensordev_install(&sc->sc_sensordev);
443 
444 	return (0);
445 }
446 
447 void
448 hv_timesync(void *arg)
449 {
450 	struct hv_ic_dev *dv = arg;
451 	struct hv_channel *ch = dv->dv_ch;
452 	struct hv_softc *sc = ch->ch_sc;
453 	struct vmbus_icmsg_hdr *hdr;
454 	struct vmbus_icmsg_timesync *msg;
455 	struct timespec guest, host, diff;
456 	uint64_t tns;
457 	uint64_t rid;
458 	uint32_t rlen;
459 	int rv;
460 
461 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
462 	if (rv || rlen == 0) {
463 		if (rv != EAGAIN)
464 			DPRINTF("%s: timesync rv=%d rlen=%u\n",
465 			    sc->sc_dev.dv_xname, rv, rlen);
466 		return;
467 	}
468 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
469 		DPRINTF("%s: timesync short read rlen=%u\n",
470 			    sc->sc_dev.dv_xname, rlen);
471 		return;
472 	}
473 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
474 	switch (hdr->ic_type) {
475 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
476 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
477 		    VMBUS_IC_VERSION(3, 0));
478 		break;
479 	case VMBUS_ICMSG_TYPE_TIMESYNC:
480 		msg = (struct vmbus_icmsg_timesync *)hdr;
481 		if (msg->ic_tsflags == VMBUS_ICMSG_TS_FLAG_SAMPLE) {
482 			microtime(&sc->sc_sensor.tv);
483 			nanotime(&guest);
484 			tns = (msg->ic_hvtime - 116444736000000000LL) * 100;
485 			host.tv_sec = tns / 1000000000LL;
486 			host.tv_nsec = tns % 1000000000LL;
487 			timespecsub(&guest, &host, &diff);
488 			sc->sc_sensor.value = (int64_t)diff.tv_sec *
489 			    1000000000LL + diff.tv_nsec;
490 			sc->sc_sensor.status = SENSOR_S_OK;
491 		}
492 		break;
493 	default:
494 		printf("%s: unhandled timesync message type %u\n",
495 		    sc->sc_dev.dv_xname, hdr->ic_type);
496 		return;
497 	}
498 
499 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
500 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
501 }
502 
503 static inline int
504 copyout_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
505 {
506 	const uint8_t *sp = src;
507 	uint8_t *dp = dst;
508 	int i, j;
509 
510 	KASSERT(dlen >= slen * 2);
511 
512 	for (i = j = 0; i < slen; i++, j += 2) {
513 		dp[j] = sp[i];
514 		dp[j + 1] = '\0';
515 	}
516 	return (j);
517 }
518 
519 static inline int
520 copyin_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
521 {
522 	const uint8_t *sp = src;
523 	uint8_t *dp = dst;
524 	int i, j;
525 
526 	KASSERT(dlen >= slen / 2);
527 
528 	for (i = j = 0; i < slen; i += 2, j++)
529 		dp[j] = sp[i];
530 	return (j);
531 }
532 
533 static inline int
534 keycmp_utf16le(const uint8_t *key, const uint8_t *ukey, size_t ukeylen)
535 {
536 	int i, j;
537 
538 	for (i = j = 0; i < ukeylen; i += 2, j++) {
539 		if (key[j] != ukey[i])
540 			return (key[j] > ukey[i] ?
541 			    key[j] - ukey[i] :
542 			    ukey[i] - key[j]);
543 	}
544 	return (0);
545 }
546 
547 static void
548 kvp_pool_init(struct kvp_pool *kvpl)
549 {
550 	TAILQ_INIT(&kvpl->kvp_entries);
551 	mtx_init(&kvpl->kvp_lock, IPL_NET);
552 	kvpl->kvp_index = 0;
553 }
554 
555 static int
556 kvp_pool_insert(struct kvp_pool *kvpl, const char *key, const char *val,
557     uint32_t vallen, uint32_t valtype)
558 {
559 	struct kvp_entry *kpe;
560 	int keylen = strlen(key);
561 
562 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
563 		return (ERANGE);
564 
565 	mtx_enter(&kvpl->kvp_lock);
566 
567 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
568 		if (strcmp(kpe->kpe_key, key) == 0) {
569 			mtx_leave(&kvpl->kvp_lock);
570 			return (EEXIST);
571 		}
572 	}
573 
574 	kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
575 	if (kpe == NULL) {
576 		mtx_leave(&kvpl->kvp_lock);
577 		return (ENOMEM);
578 	}
579 
580 	strlcpy(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2);
581 
582 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
583 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
584 	else
585 		memcpy(kpe->kpe_val, val, vallen);
586 
587 	kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
588 
589 	TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
590 
591 	mtx_leave(&kvpl->kvp_lock);
592 
593 	return (0);
594 }
595 
596 static int
597 kvp_pool_update(struct kvp_pool *kvpl, const char *key, const char *val,
598     uint32_t vallen, uint32_t valtype)
599 {
600 	struct kvp_entry *kpe;
601 	int keylen = strlen(key);
602 
603 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
604 		return (ERANGE);
605 
606 	mtx_enter(&kvpl->kvp_lock);
607 
608 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
609 		if (strcmp(kpe->kpe_key, key) == 0)
610 			break;
611 	}
612 	if (kpe == NULL) {
613 		mtx_leave(&kvpl->kvp_lock);
614 		return (ENOENT);
615 	}
616 
617 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
618 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
619 	else
620 		memcpy(kpe->kpe_val, val, vallen);
621 
622 	mtx_leave(&kvpl->kvp_lock);
623 
624 	return (0);
625 }
626 
627 static int
628 kvp_pool_import(struct kvp_pool *kvpl, const char *key, uint32_t keylen,
629     const char *val, uint32_t vallen, uint32_t valtype)
630 {
631 	struct kvp_entry *kpe;
632 
633 	if (keylen > HV_KVP_MAX_KEY_SIZE ||
634 	    vallen > HV_KVP_MAX_VAL_SIZE)
635 		return (ERANGE);
636 
637 	mtx_enter(&kvpl->kvp_lock);
638 
639 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
640 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
641 			break;
642 	}
643 	if (kpe == NULL) {
644 		kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
645 		if (kpe == NULL) {
646 			mtx_leave(&kvpl->kvp_lock);
647 			return (ENOMEM);
648 		}
649 
650 		copyin_utf16le(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2,
651 		    keylen);
652 
653 		kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
654 
655 		TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
656 	}
657 
658 	copyin_utf16le(kpe->kpe_val, val, HV_KVP_MAX_VAL_SIZE / 2, vallen);
659 	kpe->kpe_valtype = valtype;
660 
661 	mtx_leave(&kvpl->kvp_lock);
662 
663 	return (0);
664 }
665 
666 static int
667 kvp_pool_export(struct kvp_pool *kvpl, uint32_t index, char *key,
668     uint32_t *keylen, char *val, uint32_t *vallen, uint32_t *valtype)
669 {
670 	struct kvp_entry *kpe;
671 
672 	mtx_enter(&kvpl->kvp_lock);
673 
674 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
675 		if (kpe->kpe_index == index)
676 			break;
677 	}
678 	if (kpe == NULL) {
679 		mtx_leave(&kvpl->kvp_lock);
680 		return (ENOENT);
681 	}
682 
683 	*keylen = copyout_utf16le(key, kpe->kpe_key, HV_KVP_MAX_KEY_SIZE,
684 	    strlen(kpe->kpe_key) + 1);
685 	*vallen = copyout_utf16le(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE,
686 	    strlen(kpe->kpe_val) + 1);
687 	*valtype = kpe->kpe_valtype;
688 
689 	mtx_leave(&kvpl->kvp_lock);
690 
691 	return (0);
692 }
693 
694 static int
695 kvp_pool_remove(struct kvp_pool *kvpl, const char *key, uint32_t keylen)
696 {
697 	struct kvp_entry *kpe;
698 
699 	mtx_enter(&kvpl->kvp_lock);
700 
701 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
702 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
703 			break;
704 	}
705 	if (kpe == NULL) {
706 		mtx_leave(&kvpl->kvp_lock);
707 		return (ENOENT);
708 	}
709 
710 	TAILQ_REMOVE(&kvpl->kvp_entries, kpe, kpe_entry);
711 
712 	mtx_leave(&kvpl->kvp_lock);
713 
714 	pool_put(&kvp_entry_pool, kpe);
715 
716 	return (0);
717 }
718 
719 static int
720 kvp_pool_extract(struct kvp_pool *kvpl, const char *key, char *val,
721     uint32_t vallen)
722 {
723 	struct kvp_entry *kpe;
724 
725 	if (vallen < HV_KVP_MAX_VAL_SIZE / 2)
726 		return (ERANGE);
727 
728 	mtx_enter(&kvpl->kvp_lock);
729 
730 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
731 		if (strcmp(kpe->kpe_key, key) == 0)
732 			break;
733 	}
734 	if (kpe == NULL) {
735 		mtx_leave(&kvpl->kvp_lock);
736 		return (ENOENT);
737 	}
738 
739 	switch (kpe->kpe_valtype) {
740 	case HV_KVP_REG_SZ:
741 		strlcpy(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE / 2);
742 		break;
743 	case HV_KVP_REG_U32:
744 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%u",
745 		    *(uint32_t *)kpe->kpe_val);
746 		break;
747 	case HV_KVP_REG_U64:
748 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%llu",
749 		    *(uint64_t *)kpe->kpe_val);
750 		break;
751 	}
752 
753 	mtx_leave(&kvpl->kvp_lock);
754 
755 	return (0);
756 }
757 
758 static int
759 kvp_pool_keys(struct kvp_pool *kvpl, int next, char *key, size_t *keylen)
760 {
761 	struct kvp_entry *kpe;
762 	int iter = 0;
763 
764 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
765 		if (iter++ < next)
766 			continue;
767 		*keylen = strlen(kpe->kpe_key) + 1;
768 		strlcpy(key, kpe->kpe_key, *keylen);
769 		return (0);
770 	}
771 
772 	return (-1);
773 }
774 
775 int
776 hv_kvp_attach(struct hv_ic_dev *dv)
777 {
778 	struct hv_channel *ch = dv->dv_ch;
779 	struct hv_softc *sc = ch->ch_sc;
780 	struct hv_kvp *kvp;
781 	int i;
782 
783 	dv->dv_buf = malloc(2 * PAGE_SIZE, M_DEVBUF, M_ZERO |
784 	    (cold ? M_NOWAIT : M_WAITOK));
785 	if (dv->dv_buf == NULL) {
786 		printf("%s: failed to allocate receive buffer\n",
787 		    sc->sc_dev.dv_xname);
788 		return (-1);
789 	}
790 
791 	dv->dv_priv = malloc(sizeof(struct hv_kvp), M_DEVBUF, M_ZERO |
792 	    (cold ? M_NOWAIT : M_WAITOK));
793 	if (dv->dv_priv == NULL) {
794 		free(dv->dv_buf, M_DEVBUF, 2 * PAGE_SIZE);
795 		printf("%s: failed to allocate KVP private data\n",
796 		    sc->sc_dev.dv_xname);
797 		return (-1);
798 	}
799 	kvp = dv->dv_priv;
800 
801 	pool_init(&kvp_entry_pool, sizeof(struct kvp_entry), 0, IPL_NET, 0,
802 	    "hvkvpl", NULL);
803 
804 	for (i = 0; i < NKVPPOOLS; i++)
805 		kvp_pool_init(&kvp->kvp_pool[i]);
806 
807 	/* Initialize 'Auto' pool */
808 	for (i = 0; i < nitems(kvp_pool_auto); i++) {
809 		if (kvp_pool_insert(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
810 		    kvp_pool_auto[i].keyname, kvp_pool_auto[i].value,
811 		    strlen(kvp_pool_auto[i].value), HV_KVP_REG_SZ))
812 			DPRINTF("%s: failed to insert into 'Auto' pool\n",
813 			    sc->sc_dev.dv_xname);
814 	}
815 
816 	sc->sc_pvbus->hv_kvop = hv_kvop;
817 	sc->sc_pvbus->hv_arg = dv;
818 
819 	return (0);
820 }
821 
822 static int
823 nibble(int ch)
824 {
825 	if (ch >= '0' && ch <= '9')
826 		return (ch - '0');
827 	if (ch >= 'A' && ch <= 'F')
828 		return (10 + ch - 'A');
829 	if (ch >= 'a' && ch <= 'f')
830 		return (10 + ch - 'a');
831 	return (-1);
832 }
833 
834 static int
835 kvp_get_ip_info(struct hv_kvp *kvp, const uint8_t *mac, uint8_t *family,
836     uint8_t *addr, uint8_t *netmask, size_t addrlen)
837 {
838 	struct ifnet *ifp;
839 	struct ifaddr *ifa, *ifa6, *ifa6ll;
840 	struct sockaddr_in *sin;
841 	struct sockaddr_in6 *sin6, sa6;
842 	uint8_t	enaddr[ETHER_ADDR_LEN];
843 	uint8_t ipaddr[INET6_ADDRSTRLEN];
844 	int i, j, lo, hi, s, af;
845 
846 	/* Convert from the UTF-16LE string format to binary */
847 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 6) {
848 		if ((hi = nibble(mac[i])) == -1 ||
849 		    (lo = nibble(mac[i+2])) == -1)
850 			return (-1);
851 		enaddr[j++] = hi << 4 | lo;
852 	}
853 
854 	switch (*family) {
855 	case ADDR_FAMILY_NONE:
856 		af = AF_UNSPEC;
857 		break;
858 	case ADDR_FAMILY_IPV4:
859 		af = AF_INET;
860 		break;
861 	case ADDR_FAMILY_IPV6:
862 		af = AF_INET6;
863 		break;
864 	default:
865 		return (-1);
866 	}
867 
868 	KERNEL_LOCK();
869 	s = splnet();
870 
871 	TAILQ_FOREACH(ifp, &ifnet, if_list) {
872 		if (!memcmp(LLADDR(ifp->if_sadl), enaddr, ETHER_ADDR_LEN))
873 			break;
874 	}
875 	if (ifp == NULL) {
876 		splx(s);
877 		KERNEL_UNLOCK();
878 		return (-1);
879 	}
880 
881 	ifa6 = ifa6ll = NULL;
882 
883 	/* Try to find a best matching address, preferring IPv4 */
884 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
885 		/*
886 		 * First IPv4 address is always a best match unless
887 		 * we were asked for for an IPv6 address.
888 		 */
889 		if ((af == AF_INET || af == AF_UNSPEC) &&
890 		    (ifa->ifa_addr->sa_family == AF_INET)) {
891 			af = AF_INET;
892 			goto found;
893 		}
894 		if ((af == AF_INET6 || af == AF_UNSPEC) &&
895 		    (ifa->ifa_addr->sa_family == AF_INET6)) {
896 			if (!IN6_IS_ADDR_LINKLOCAL(
897 			    &satosin6(ifa->ifa_addr)->sin6_addr)) {
898 				/* Done if we're looking for an IPv6 address */
899 				if (af == AF_INET6)
900 					goto found;
901 				/* Stick to the first one */
902 				if (ifa6 == NULL)
903 					ifa6 = ifa;
904 			} else	/* Pick the last one */
905 				ifa6ll = ifa;
906 		}
907 	}
908 	/* If we haven't found any IPv4 or IPv6 direct matches... */
909 	if (ifa == NULL) {
910 		/* ... try the last global IPv6 address... */
911 		if (ifa6 != NULL)
912 			ifa = ifa6;
913 		/* ... or the last link-local...  */
914 		else if (ifa6ll != NULL)
915 			ifa = ifa6ll;
916 		else {
917 			splx(s);
918 			KERNEL_UNLOCK();
919 			return (-1);
920 		}
921 	}
922  found:
923 	switch (af) {
924 	case AF_INET:
925 		sin = satosin(ifa->ifa_addr);
926 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
927 		copyout_utf16le(addr, ipaddr, addrlen, INET_ADDRSTRLEN);
928 
929 		sin = satosin(ifa->ifa_netmask);
930 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
931 		copyout_utf16le(netmask, ipaddr, addrlen, INET_ADDRSTRLEN);
932 
933 		*family = ADDR_FAMILY_IPV4;
934 		break;
935 	case AF_UNSPEC:
936 	case AF_INET6:
937 		sin6 = satosin6(ifa->ifa_addr);
938 		if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
939 			sa6 = *satosin6(ifa->ifa_addr);
940 			sa6.sin6_addr.s6_addr16[1] = 0;
941 			sin6 = &sa6;
942 		}
943 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
944 		copyout_utf16le(addr, ipaddr, addrlen, INET6_ADDRSTRLEN);
945 
946 		sin6 = satosin6(ifa->ifa_netmask);
947 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
948 		copyout_utf16le(netmask, ipaddr, addrlen, INET6_ADDRSTRLEN);
949 
950 		*family = ADDR_FAMILY_IPV6;
951 		break;
952 	}
953 
954 	splx(s);
955 	KERNEL_UNLOCK();
956 
957 	return (0);
958 }
959 
960 static void
961 hv_kvp_process(struct hv_kvp *kvp, struct vmbus_icmsg_kvp *msg)
962 {
963 	union hv_kvp_hdr *kvh = &msg->ic_kvh;
964 	union hv_kvp_msg *kvm = &msg->ic_kvm;
965 
966 	switch (kvh->kvh_op) {
967 	case HV_KVP_OP_SET:
968 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO_EXTERNAL &&
969 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_AUTO_EXTERNAL],
970 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
971 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
972 		    kvm->kvm_val.kvm_valtype)) {
973 			DPRINTF("%s: failed to import into 'Guest/Parameters'"
974 			    " pool\n", __func__);
975 			kvh->kvh_err = HV_KVP_S_CONT;
976 		} else if (kvh->kvh_pool == HV_KVP_POOL_EXTERNAL &&
977 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
978 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
979 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
980 		    kvm->kvm_val.kvm_valtype)) {
981 			DPRINTF("%s: failed to import into 'External' pool\n",
982 			    __func__);
983 			kvh->kvh_err = HV_KVP_S_CONT;
984 		} else if (kvh->kvh_pool != HV_KVP_POOL_AUTO_EXTERNAL &&
985 		    kvh->kvh_pool != HV_KVP_POOL_EXTERNAL) {
986 			kvh->kvh_err = HV_KVP_S_CONT;
987 		} else
988 			kvh->kvh_err = HV_KVP_S_OK;
989 		break;
990 	case HV_KVP_OP_DELETE:
991 		if (kvh->kvh_pool != HV_KVP_POOL_EXTERNAL ||
992 		    kvp_pool_remove(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
993 		    kvm->kvm_del.kvm_key, kvm->kvm_del.kvm_keylen)) {
994 			DPRINTF("%s: failed to remove from 'External' pool\n",
995 			    __func__);
996 			kvh->kvh_err = HV_KVP_S_CONT;
997 		} else
998 			kvh->kvh_err = HV_KVP_S_OK;
999 		break;
1000 	case HV_KVP_OP_ENUMERATE:
1001 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO &&
1002 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
1003 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1004 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1005 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1006 			kvh->kvh_err = HV_KVP_S_CONT;
1007 		else if (kvh->kvh_pool == HV_KVP_POOL_GUEST &&
1008 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_GUEST],
1009 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1010 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1011 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1012 			kvh->kvh_err = HV_KVP_S_CONT;
1013 		else
1014 			kvh->kvh_err = HV_KVP_S_OK;
1015 		break;
1016 	case HV_KVP_OP_GET_IP_INFO:
1017 		if (VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver) <= 4) {
1018 			struct vmbus_icmsg_kvp_addr *amsg;
1019 			struct hv_kvp_msg_addr *kva;
1020 
1021 			amsg = (struct vmbus_icmsg_kvp_addr *)msg;
1022 			kva = &amsg->ic_kvm;
1023 
1024 			if (kvp_get_ip_info(kvp, kva->kvm_mac,
1025 			    &kva->kvm_family, kva->kvm_addr,
1026 			    kva->kvm_netmask, sizeof(kva->kvm_addr)))
1027 				kvh->kvh_err = HV_KVP_S_CONT;
1028 			else
1029 				kvh->kvh_err = HV_KVP_S_OK;
1030 		} else {
1031 			DPRINTF("KVP GET_IP_INFO fw %u.%u msg %u.%u dsize=%u\n",
1032 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_fwver),
1033 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_fwver),
1034 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver),
1035 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_msgver),
1036 			    msg->ic_hdr.ic_dsize);
1037 			kvh->kvh_err = HV_KVP_S_CONT;
1038 		}
1039 		break;
1040 	default:
1041 		DPRINTF("KVP message op %u pool %u\n", kvh->kvh_op,
1042 		    kvh->kvh_pool);
1043 		kvh->kvh_err = HV_KVP_S_CONT;
1044 	}
1045 }
1046 
1047 void
1048 hv_kvp(void *arg)
1049 {
1050 	struct hv_ic_dev *dv = arg;
1051 	struct hv_channel *ch = dv->dv_ch;
1052 	struct hv_softc *sc = ch->ch_sc;
1053 	struct hv_kvp *kvp = dv->dv_priv;
1054 	struct vmbus_icmsg_hdr *hdr;
1055 	uint64_t rid;
1056 	uint32_t fwver, msgver, rlen;
1057 	int rv;
1058 
1059 	for (;;) {
1060 		rv = hv_channel_recv(ch, dv->dv_buf, 2 * PAGE_SIZE,
1061 		    &rlen, &rid, 0);
1062 		if (rv || rlen == 0) {
1063 			if (rv != EAGAIN)
1064 				DPRINTF("%s: kvp rv=%d rlen=%u\n",
1065 				    sc->sc_dev.dv_xname, rv, rlen);
1066 			return;
1067 		}
1068 		if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
1069 			DPRINTF("%s: kvp short read rlen=%u\n",
1070 			    sc->sc_dev.dv_xname, rlen);
1071 			return;
1072 		}
1073 		hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
1074 		switch (hdr->ic_type) {
1075 		case VMBUS_ICMSG_TYPE_NEGOTIATE:
1076 			switch (sc->sc_proto) {
1077 			case VMBUS_VERSION_WS2008:
1078 				fwver = VMBUS_IC_VERSION(1, 0);
1079 				msgver = VMBUS_IC_VERSION(1, 0);
1080 				break;
1081 			case VMBUS_VERSION_WIN7:
1082 				fwver = VMBUS_IC_VERSION(3, 0);
1083 				msgver = VMBUS_IC_VERSION(3, 0);
1084 				break;
1085 			default:
1086 				fwver = VMBUS_IC_VERSION(3, 0);
1087 				msgver = VMBUS_IC_VERSION(4, 0);
1088 			}
1089 			hv_ic_negotiate(hdr, &rlen, fwver, msgver);
1090 			break;
1091 		case VMBUS_ICMSG_TYPE_KVP:
1092 			if (hdr->ic_dsize >= sizeof(union hv_kvp_hdr))
1093 				hv_kvp_process(kvp,
1094 				    (struct vmbus_icmsg_kvp *)hdr);
1095 			else
1096 				printf("%s: message too short: %u\n",
1097 				    sc->sc_dev.dv_xname, hdr->ic_dsize);
1098 			break;
1099 		default:
1100 			printf("%s: unhandled kvp message type %u\n",
1101 			    sc->sc_dev.dv_xname, hdr->ic_type);
1102 			continue;
1103 		}
1104 		hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION |
1105 		    VMBUS_ICMSG_FLAG_RESPONSE;
1106 		hv_channel_send(ch, dv->dv_buf, rlen, rid,
1107 		    VMBUS_CHANPKT_TYPE_INBAND, 0);
1108 	}
1109 }
1110 
1111 static int
1112 kvp_poolname(char **key)
1113 {
1114 	char *p;
1115 	int i, rv = -1;
1116 
1117 	if ((p = strrchr(*key, '/')) == NULL)
1118 		return (rv);
1119 	*p = '\0';
1120 	for (i = 0; i < nitems(kvp_pools); i++) {
1121 		if (strncasecmp(*key, kvp_pools[i].poolname,
1122 		    kvp_pools[i].poolnamelen) == 0) {
1123 			rv = kvp_pools[i].poolidx;
1124 			break;
1125 		}
1126 	}
1127 	if (rv >= 0)
1128 		*key = ++p;
1129 	return (rv);
1130 }
1131 
1132 int
1133 hv_kvop(void *arg, int op, char *key, char *val, size_t vallen)
1134 {
1135 	struct hv_ic_dev *dv = arg;
1136 	struct hv_kvp *kvp = dv->dv_priv;
1137 	struct kvp_pool *kvpl;
1138 	int next, pool, error = 0;
1139 	char *vp = val;
1140 	size_t keylen;
1141 
1142 	pool = kvp_poolname(&key);
1143 	if (pool == -1)
1144 		return (EINVAL);
1145 
1146 	kvpl = &kvp->kvp_pool[pool];
1147 	if (strlen(key) == 0) {
1148 		for (next = 0; next < MAXPOOLENTS; next++) {
1149 			if ((val + vallen < vp + HV_KVP_MAX_KEY_SIZE / 2) ||
1150 			    kvp_pool_keys(kvpl, next, vp, &keylen))
1151 				goto out;
1152 			if (strlcat(val, "\n", vallen) >= vallen)
1153 				goto out;
1154 			vp += keylen;
1155 		}
1156  out:
1157 		if (vp > val)
1158 			*(vp - 1) = '\0';
1159 		return (0);
1160 	}
1161 
1162 	if (op == PVBUS_KVWRITE) {
1163 		if (pool == HV_KVP_POOL_AUTO)
1164 			error = kvp_pool_update(kvpl, key, val, vallen,
1165 			    HV_KVP_REG_SZ);
1166 		else if (pool == HV_KVP_POOL_GUEST)
1167 			error = kvp_pool_insert(kvpl, key, val, vallen,
1168 			    HV_KVP_REG_SZ);
1169 		else
1170 			error = EINVAL;
1171 	} else
1172 		error = kvp_pool_extract(kvpl, key, val, vallen);
1173 
1174 	return (error);
1175 }
1176