xref: /openbsd-src/sys/dev/pv/hypervic.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*-
2  * Copyright (c) 2009-2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/pool.h>
55 #include <sys/timetc.h>
56 #include <sys/task.h>
57 #include <sys/syslog.h>
58 #include <sys/socket.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 
64 #include <machine/i82489var.h>
65 
66 #include <dev/rndvar.h>
67 
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 
73 #include <dev/pv/pvvar.h>
74 #include <dev/pv/pvreg.h>
75 #include <dev/pv/hypervreg.h>
76 #include <dev/pv/hypervvar.h>
77 #include <dev/pv/hypervicreg.h>
78 
79 struct hv_ic_dev;
80 
81 #define NKVPPOOLS			4
82 #define MAXPOOLENTS			1023
83 
84 struct kvp_entry {
85 	int				kpe_index;
86 	uint32_t			kpe_valtype;
87 	uint8_t				kpe_key[HV_KVP_MAX_KEY_SIZE / 2];
88 	uint8_t				kpe_val[HV_KVP_MAX_VAL_SIZE / 2];
89 	TAILQ_ENTRY(kvp_entry)		kpe_entry;
90 };
91 TAILQ_HEAD(kvp_list, kvp_entry);
92 
93 struct kvp_pool {
94 	struct kvp_list			kvp_entries;
95 	struct mutex			kvp_lock;
96 	u_int				kvp_index;
97 };
98 
99 struct pool				kvp_entry_pool;
100 
101 struct hv_kvp {
102 	struct kvp_pool			kvp_pool[NKVPPOOLS];
103 };
104 
105 int	hv_heartbeat_attach(struct hv_ic_dev *);
106 void	hv_heartbeat(void *);
107 int	hv_kvp_attach(struct hv_ic_dev *);
108 void	hv_kvp(void *);
109 int	hv_kvop(void *, int, char *, char *, size_t);
110 int	hv_shutdown_attach(struct hv_ic_dev *);
111 void	hv_shutdown(void *);
112 int	hv_timesync_attach(struct hv_ic_dev *);
113 void	hv_timesync(void *);
114 
115 static struct hv_ic_dev {
116 	const char		 *dv_name;
117 	const struct hv_guid	 *dv_type;
118 	int			(*dv_attach)(struct hv_ic_dev *);
119 	void			(*dv_handler)(void *);
120 	struct hv_channel	 *dv_ch;
121 	uint8_t			 *dv_buf;
122 	void			 *dv_priv;
123 } hv_ic_devs[] = {
124 	{
125 		"heartbeat",
126 		&hv_guid_heartbeat,
127 		hv_heartbeat_attach,
128 		hv_heartbeat
129 	},
130 	{
131 		"kvp",
132 		&hv_guid_kvp,
133 		hv_kvp_attach,
134 		hv_kvp
135 	},
136 	{
137 		"shutdown",
138 		&hv_guid_shutdown,
139 		hv_shutdown_attach,
140 		hv_shutdown
141 	},
142 	{
143 		"timesync",
144 		&hv_guid_timesync,
145 		hv_timesync_attach,
146 		hv_timesync
147 	}
148 };
149 
150 static const struct {
151 	enum hv_kvp_pool		 poolidx;
152 	const char			*poolname;
153 	size_t				 poolnamelen;
154 } kvp_pools[] = {
155 	{ HV_KVP_POOL_EXTERNAL,		"External",	sizeof("External") },
156 	{ HV_KVP_POOL_GUEST,		"Guest",	sizeof("Guest")	},
157 	{ HV_KVP_POOL_AUTO,		"Auto",		sizeof("Auto") },
158 	{ HV_KVP_POOL_AUTO_EXTERNAL,	"Guest/Parameters",
159 	  sizeof("Guest/Parameters") }
160 };
161 
162 static const struct {
163 	int				 keyidx;
164 	const char			*keyname;
165 	const char			*value;
166 } kvp_pool_auto[] = {
167 	{ 0, "FullyQualifiedDomainName",	hostname },
168 	{ 1, "IntegrationServicesVersion",	"6.6.6"	},
169 	{ 2, "NetworkAddressIPv4",		"127.0.0.1" },
170 	{ 3, "NetworkAddressIPv6",		"::1" },
171 	{ 4, "OSBuildNumber",			osversion },
172 	{ 5, "OSName",				ostype },
173 	{ 6, "OSMajorVersion",			"6" }, /* free commit for mike */
174 	{ 7, "OSMinorVersion",			&osrelease[2] },
175 	{ 8, "OSVersion",			osrelease },
176 #ifdef __amd64__ /* As specified in SYSTEM_INFO.wProcessorArchitecture */
177 	{ 9, "ProcessorArchitecture",		"9" }
178 #else
179 	{ 9, "ProcessorArchitecture",		"0" }
180 #endif
181 };
182 
183 void
184 hv_attach_icdevs(struct hv_softc *sc)
185 {
186 	struct hv_ic_dev *dv;
187 	struct hv_channel *ch;
188 	int i, header = 0;
189 
190 	for (i = 0; i < nitems(hv_ic_devs); i++) {
191 		dv = &hv_ic_devs[i];
192 
193 		TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
194 			if (ch->ch_state != HV_CHANSTATE_OFFERED)
195 				continue;
196 			if (ch->ch_flags & CHF_MONITOR)
197 				continue;
198 			if (memcmp(dv->dv_type, &ch->ch_type,
199 			    sizeof(ch->ch_type)) == 0)
200 				break;
201 		}
202 		if (ch == NULL)
203 			continue;
204 
205 		dv->dv_ch = ch;
206 
207 		/*
208 		 * These services are not performance critical and
209 		 * do not need batched reading. Furthermore, some
210 		 * services such as KVP can only handle one message
211 		 * from the host at a time.
212 		 */
213 		dv->dv_ch->ch_flags &= ~CHF_BATCHED;
214 
215 		if (dv->dv_attach && dv->dv_attach(dv) != 0)
216 			continue;
217 
218 		if (hv_channel_open(ch, VMBUS_IC_BUFRINGSIZE, NULL, 0,
219 		    dv->dv_handler, dv)) {
220 			printf("%s: failed to open channel for %s\n",
221 			    sc->sc_dev.dv_xname, dv->dv_name);
222 			continue;
223 		}
224 		evcount_attach(&ch->ch_evcnt, dv->dv_name, &sc->sc_idtvec);
225 
226 		if (!header) {
227 			printf("%s: %s", sc->sc_dev.dv_xname, dv->dv_name);
228 			header = 1;
229 		} else
230 			printf(", %s", dv->dv_name);
231 	}
232 	if (header)
233 		printf("\n");
234 }
235 
236 static inline void
237 hv_ic_negotiate(struct vmbus_icmsg_hdr *hdr, uint32_t *rlen, uint32_t fwver,
238     uint32_t msgver)
239 {
240 	struct vmbus_icmsg_negotiate *msg;
241 	uint16_t propmin, propmaj, chosenmaj, chosenmin;
242 	int i;
243 
244 	msg = (struct vmbus_icmsg_negotiate *)hdr;
245 
246 	chosenmaj = chosenmin = 0;
247 	for (i = 0; i < msg->ic_fwver_cnt; i++) {
248 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
249 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
250 		if (propmaj > chosenmaj &&
251 		    propmaj <= VMBUS_ICVER_MAJOR(fwver) &&
252 		    propmin >= chosenmin &&
253 		    propmin <= VMBUS_ICVER_MINOR(fwver)) {
254 			chosenmaj = propmaj;
255 			chosenmin = propmin;
256 		}
257 	}
258 	fwver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
259 
260 	chosenmaj = chosenmin = 0;
261 	for (; i < msg->ic_fwver_cnt + msg->ic_msgver_cnt; i++) {
262 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
263 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
264 		if (propmaj > chosenmaj &&
265 		    propmaj <= VMBUS_ICVER_MAJOR(msgver) &&
266 		    propmin >= chosenmin &&
267 		    propmin <= VMBUS_ICVER_MINOR(msgver)) {
268 			chosenmaj = propmaj;
269 			chosenmin = propmin;
270 		}
271 	}
272 	msgver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
273 
274 	msg->ic_fwver_cnt = 1;
275 	msg->ic_ver[0] = fwver;
276 	msg->ic_msgver_cnt = 1;
277 	msg->ic_ver[1] = msgver;
278 	hdr->ic_dsize = sizeof(*msg) + 2 * sizeof(uint32_t) -
279 	    sizeof(struct vmbus_icmsg_hdr);
280 	if (*rlen < sizeof(*msg) + 2 * sizeof(uint32_t))
281 		*rlen = sizeof(*msg) + 2 * sizeof(uint32_t);
282 }
283 
284 int
285 hv_heartbeat_attach(struct hv_ic_dev *dv)
286 {
287 	struct hv_channel *ch = dv->dv_ch;
288 	struct hv_softc *sc = ch->ch_sc;
289 
290 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
291 	    (cold ? M_NOWAIT : M_WAITOK));
292 	if (dv->dv_buf == NULL) {
293 		printf("%s: failed to allocate receive buffer\n",
294 		    sc->sc_dev.dv_xname);
295 		return (-1);
296 	}
297 	return (0);
298 }
299 
300 void
301 hv_heartbeat(void *arg)
302 {
303 	struct hv_ic_dev *dv = arg;
304 	struct hv_channel *ch = dv->dv_ch;
305 	struct hv_softc *sc = ch->ch_sc;
306 	struct vmbus_icmsg_hdr *hdr;
307 	struct vmbus_icmsg_heartbeat *msg;
308 	uint64_t rid;
309 	uint32_t rlen;
310 	int rv;
311 
312 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
313 	if (rv || rlen == 0) {
314 		if (rv != EAGAIN)
315 			DPRINTF("%s: heartbeat rv=%d rlen=%u\n",
316 			    sc->sc_dev.dv_xname, rv, rlen);
317 		return;
318 	}
319 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
320 		DPRINTF("%s: heartbeat short read rlen=%u\n",
321 			    sc->sc_dev.dv_xname, rlen);
322 		return;
323 	}
324 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
325 	switch (hdr->ic_type) {
326 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
327 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
328 		    VMBUS_IC_VERSION(3, 0));
329 		break;
330 	case VMBUS_ICMSG_TYPE_HEARTBEAT:
331 		msg = (struct vmbus_icmsg_heartbeat *)hdr;
332 		msg->ic_seq += 1;
333 		break;
334 	default:
335 		printf("%s: unhandled heartbeat message type %u\n",
336 		    sc->sc_dev.dv_xname, hdr->ic_type);
337 		return;
338 	}
339 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
340 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
341 }
342 
343 static void
344 hv_shutdown_task(void *arg)
345 {
346 	struct hv_softc *sc = arg;
347 	pvbus_shutdown(&sc->sc_dev);
348 }
349 
350 int
351 hv_shutdown_attach(struct hv_ic_dev *dv)
352 {
353 	struct hv_channel *ch = dv->dv_ch;
354 	struct hv_softc *sc = ch->ch_sc;
355 
356 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
357 	    (cold ? M_NOWAIT : M_WAITOK));
358 	if (dv->dv_buf == NULL) {
359 		printf("%s: failed to allocate receive buffer\n",
360 		    sc->sc_dev.dv_xname);
361 		return (-1);
362 	}
363 
364 	task_set(&sc->sc_sdtask, hv_shutdown_task, sc);
365 
366 	return (0);
367 }
368 
369 void
370 hv_shutdown(void *arg)
371 {
372 	struct hv_ic_dev *dv = arg;
373 	struct hv_channel *ch = dv->dv_ch;
374 	struct hv_softc *sc = ch->ch_sc;
375 	struct vmbus_icmsg_hdr *hdr;
376 	struct vmbus_icmsg_shutdown *msg;
377 	uint64_t rid;
378 	uint32_t rlen;
379 	int rv, shutdown = 0;
380 
381 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
382 	if (rv || rlen == 0) {
383 		if (rv != EAGAIN)
384 			DPRINTF("%s: shutdown rv=%d rlen=%u\n",
385 			    sc->sc_dev.dv_xname, rv, rlen);
386 		return;
387 	}
388 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
389 		DPRINTF("%s: shutdown short read rlen=%u\n",
390 			    sc->sc_dev.dv_xname, rlen);
391 		return;
392 	}
393 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
394 	switch (hdr->ic_type) {
395 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
396 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
397 		    VMBUS_IC_VERSION(3, 0));
398 		break;
399 	case VMBUS_ICMSG_TYPE_SHUTDOWN:
400 		msg = (struct vmbus_icmsg_shutdown *)hdr;
401 		if (msg->ic_haltflags == 0 || msg->ic_haltflags == 1) {
402 			shutdown = 1;
403 			hdr->ic_status = VMBUS_ICMSG_STATUS_OK;
404 		} else
405 			hdr->ic_status = VMBUS_ICMSG_STATUS_FAIL;
406 		break;
407 	default:
408 		printf("%s: unhandled shutdown message type %u\n",
409 		    sc->sc_dev.dv_xname, hdr->ic_type);
410 		return;
411 	}
412 
413 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
414 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
415 
416 	if (shutdown)
417 		task_add(systq, &sc->sc_sdtask);
418 }
419 
420 int
421 hv_timesync_attach(struct hv_ic_dev *dv)
422 {
423 	struct hv_channel *ch = dv->dv_ch;
424 	struct hv_softc *sc = ch->ch_sc;
425 
426 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
427 	    (cold ? M_NOWAIT : M_WAITOK));
428 	if (dv->dv_buf == NULL) {
429 		printf("%s: failed to allocate receive buffer\n",
430 		    sc->sc_dev.dv_xname);
431 		return (-1);
432 	}
433 
434 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
435 	    sizeof(sc->sc_sensordev.xname));
436 
437 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
438 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
439 
440 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
441 	sensordev_install(&sc->sc_sensordev);
442 
443 	return (0);
444 }
445 
446 void
447 hv_timesync(void *arg)
448 {
449 	struct hv_ic_dev *dv = arg;
450 	struct hv_channel *ch = dv->dv_ch;
451 	struct hv_softc *sc = ch->ch_sc;
452 	struct vmbus_icmsg_hdr *hdr;
453 	struct vmbus_icmsg_timesync *msg;
454 	struct timespec guest, host, diff;
455 	uint64_t tns;
456 	uint64_t rid;
457 	uint32_t rlen;
458 	int rv;
459 
460 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
461 	if (rv || rlen == 0) {
462 		if (rv != EAGAIN)
463 			DPRINTF("%s: timesync rv=%d rlen=%u\n",
464 			    sc->sc_dev.dv_xname, rv, rlen);
465 		return;
466 	}
467 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
468 		DPRINTF("%s: timesync short read rlen=%u\n",
469 			    sc->sc_dev.dv_xname, rlen);
470 		return;
471 	}
472 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
473 	switch (hdr->ic_type) {
474 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
475 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
476 		    VMBUS_IC_VERSION(3, 0));
477 		break;
478 	case VMBUS_ICMSG_TYPE_TIMESYNC:
479 		msg = (struct vmbus_icmsg_timesync *)hdr;
480 		if (msg->ic_tsflags == VMBUS_ICMSG_TS_FLAG_SAMPLE) {
481 			microtime(&sc->sc_sensor.tv);
482 			nanotime(&guest);
483 			tns = (msg->ic_hvtime - 116444736000000000LL) * 100;
484 			host.tv_sec = tns / 1000000000LL;
485 			host.tv_nsec = tns % 1000000000LL;
486 			timespecsub(&guest, &host, &diff);
487 			sc->sc_sensor.value = (int64_t)diff.tv_sec *
488 			    1000000000LL + diff.tv_nsec;
489 			sc->sc_sensor.status = SENSOR_S_OK;
490 		}
491 		break;
492 	default:
493 		printf("%s: unhandled timesync message type %u\n",
494 		    sc->sc_dev.dv_xname, hdr->ic_type);
495 		return;
496 	}
497 
498 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
499 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
500 }
501 
502 static inline int
503 copyout_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
504 {
505 	const uint8_t *sp = src;
506 	uint8_t *dp = dst;
507 	int i, j;
508 
509 	KASSERT(dlen >= slen * 2);
510 
511 	for (i = j = 0; i < slen; i++, j += 2) {
512 		dp[j] = sp[i];
513 		dp[j + 1] = '\0';
514 	}
515 	return (j);
516 }
517 
518 static inline int
519 copyin_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
520 {
521 	const uint8_t *sp = src;
522 	uint8_t *dp = dst;
523 	int i, j;
524 
525 	KASSERT(dlen >= slen / 2);
526 
527 	for (i = j = 0; i < slen; i += 2, j++)
528 		dp[j] = sp[i];
529 	return (j);
530 }
531 
532 static inline int
533 keycmp_utf16le(const uint8_t *key, const uint8_t *ukey, size_t ukeylen)
534 {
535 	int i, j;
536 
537 	for (i = j = 0; i < ukeylen; i += 2, j++) {
538 		if (key[j] != ukey[i])
539 			return (key[j] > ukey[i] ?
540 			    key[j] - ukey[i] :
541 			    ukey[i] - key[j]);
542 	}
543 	return (0);
544 }
545 
546 static void
547 kvp_pool_init(struct kvp_pool *kvpl)
548 {
549 	TAILQ_INIT(&kvpl->kvp_entries);
550 	mtx_init(&kvpl->kvp_lock, IPL_NET);
551 	kvpl->kvp_index = 0;
552 }
553 
554 static int
555 kvp_pool_insert(struct kvp_pool *kvpl, const char *key, const char *val,
556     uint32_t vallen, uint32_t valtype)
557 {
558 	struct kvp_entry *kpe;
559 	int keylen = strlen(key);
560 
561 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
562 		return (ERANGE);
563 
564 	mtx_enter(&kvpl->kvp_lock);
565 
566 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
567 		if (strcmp(kpe->kpe_key, key) == 0) {
568 			mtx_leave(&kvpl->kvp_lock);
569 			return (EEXIST);
570 		}
571 	}
572 
573 	kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
574 	if (kpe == NULL) {
575 		mtx_leave(&kvpl->kvp_lock);
576 		return (ENOMEM);
577 	}
578 
579 	strlcpy(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2);
580 
581 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
582 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
583 	else
584 		memcpy(kpe->kpe_val, val, vallen);
585 
586 	kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
587 
588 	TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
589 
590 	mtx_leave(&kvpl->kvp_lock);
591 
592 	return (0);
593 }
594 
595 static int
596 kvp_pool_update(struct kvp_pool *kvpl, const char *key, const char *val,
597     uint32_t vallen, uint32_t valtype)
598 {
599 	struct kvp_entry *kpe;
600 	int keylen = strlen(key);
601 
602 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
603 		return (ERANGE);
604 
605 	mtx_enter(&kvpl->kvp_lock);
606 
607 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
608 		if (strcmp(kpe->kpe_key, key) == 0)
609 			break;
610 	}
611 	if (kpe == NULL) {
612 		mtx_leave(&kvpl->kvp_lock);
613 		return (ENOENT);
614 	}
615 
616 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
617 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
618 	else
619 		memcpy(kpe->kpe_val, val, vallen);
620 
621 	mtx_leave(&kvpl->kvp_lock);
622 
623 	return (0);
624 }
625 
626 static int
627 kvp_pool_import(struct kvp_pool *kvpl, const char *key, uint32_t keylen,
628     const char *val, uint32_t vallen, uint32_t valtype)
629 {
630 	struct kvp_entry *kpe;
631 
632 	if (keylen > HV_KVP_MAX_KEY_SIZE ||
633 	    vallen > HV_KVP_MAX_VAL_SIZE)
634 		return (ERANGE);
635 
636 	mtx_enter(&kvpl->kvp_lock);
637 
638 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
639 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
640 			break;
641 	}
642 	if (kpe == NULL) {
643 		kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
644 		if (kpe == NULL) {
645 			mtx_leave(&kvpl->kvp_lock);
646 			return (ENOMEM);
647 		}
648 
649 		copyin_utf16le(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2,
650 		    keylen);
651 
652 		kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
653 
654 		TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
655 	}
656 
657 	copyin_utf16le(kpe->kpe_val, val, HV_KVP_MAX_VAL_SIZE / 2, vallen);
658 	kpe->kpe_valtype = valtype;
659 
660 	mtx_leave(&kvpl->kvp_lock);
661 
662 	return (0);
663 }
664 
665 static int
666 kvp_pool_export(struct kvp_pool *kvpl, uint32_t index, char *key,
667     uint32_t *keylen, char *val, uint32_t *vallen, uint32_t *valtype)
668 {
669 	struct kvp_entry *kpe;
670 
671 	mtx_enter(&kvpl->kvp_lock);
672 
673 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
674 		if (kpe->kpe_index == index)
675 			break;
676 	}
677 	if (kpe == NULL) {
678 		mtx_leave(&kvpl->kvp_lock);
679 		return (ENOENT);
680 	}
681 
682 	*keylen = copyout_utf16le(key, kpe->kpe_key, HV_KVP_MAX_KEY_SIZE,
683 	    strlen(kpe->kpe_key) + 1);
684 	*vallen = copyout_utf16le(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE,
685 	    strlen(kpe->kpe_val) + 1);
686 	*valtype = kpe->kpe_valtype;
687 
688 	mtx_leave(&kvpl->kvp_lock);
689 
690 	return (0);
691 }
692 
693 static int
694 kvp_pool_remove(struct kvp_pool *kvpl, const char *key, uint32_t keylen)
695 {
696 	struct kvp_entry *kpe;
697 
698 	mtx_enter(&kvpl->kvp_lock);
699 
700 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
701 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
702 			break;
703 	}
704 	if (kpe == NULL) {
705 		mtx_leave(&kvpl->kvp_lock);
706 		return (ENOENT);
707 	}
708 
709 	TAILQ_REMOVE(&kvpl->kvp_entries, kpe, kpe_entry);
710 
711 	mtx_leave(&kvpl->kvp_lock);
712 
713 	pool_put(&kvp_entry_pool, kpe);
714 
715 	return (0);
716 }
717 
718 static int
719 kvp_pool_extract(struct kvp_pool *kvpl, const char *key, char *val,
720     uint32_t vallen)
721 {
722 	struct kvp_entry *kpe;
723 
724 	if (vallen < HV_KVP_MAX_VAL_SIZE / 2)
725 		return (ERANGE);
726 
727 	mtx_enter(&kvpl->kvp_lock);
728 
729 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
730 		if (strcmp(kpe->kpe_key, key) == 0)
731 			break;
732 	}
733 	if (kpe == NULL) {
734 		mtx_leave(&kvpl->kvp_lock);
735 		return (ENOENT);
736 	}
737 
738 	switch (kpe->kpe_valtype) {
739 	case HV_KVP_REG_SZ:
740 		strlcpy(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE / 2);
741 		break;
742 	case HV_KVP_REG_U32:
743 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%u",
744 		    *(uint32_t *)kpe->kpe_val);
745 		break;
746 	case HV_KVP_REG_U64:
747 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%llu",
748 		    *(uint64_t *)kpe->kpe_val);
749 		break;
750 	}
751 
752 	mtx_leave(&kvpl->kvp_lock);
753 
754 	return (0);
755 }
756 
757 static int
758 kvp_pool_keys(struct kvp_pool *kvpl, int next, char *key, size_t *keylen)
759 {
760 	struct kvp_entry *kpe;
761 	int iter = 0;
762 
763 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
764 		if (iter++ < next)
765 			continue;
766 		*keylen = strlen(kpe->kpe_key) + 1;
767 		strlcpy(key, kpe->kpe_key, *keylen);
768 		return (0);
769 	}
770 
771 	return (-1);
772 }
773 
774 int
775 hv_kvp_attach(struct hv_ic_dev *dv)
776 {
777 	struct hv_channel *ch = dv->dv_ch;
778 	struct hv_softc *sc = ch->ch_sc;
779 	struct hv_kvp *kvp;
780 	int i;
781 
782 	dv->dv_buf = malloc(2 * PAGE_SIZE, M_DEVBUF, M_ZERO |
783 	    (cold ? M_NOWAIT : M_WAITOK));
784 	if (dv->dv_buf == NULL) {
785 		printf("%s: failed to allocate receive buffer\n",
786 		    sc->sc_dev.dv_xname);
787 		return (-1);
788 	}
789 
790 	dv->dv_priv = malloc(sizeof(struct hv_kvp), M_DEVBUF, M_ZERO |
791 	    (cold ? M_NOWAIT : M_WAITOK));
792 	if (dv->dv_priv == NULL) {
793 		free(dv->dv_buf, M_DEVBUF, 2 * PAGE_SIZE);
794 		printf("%s: failed to allocate KVP private data\n",
795 		    sc->sc_dev.dv_xname);
796 		return (-1);
797 	}
798 	kvp = dv->dv_priv;
799 
800 	pool_init(&kvp_entry_pool, sizeof(struct kvp_entry), 0, IPL_NET, 0,
801 	    "hvkvpl", NULL);
802 
803 	for (i = 0; i < NKVPPOOLS; i++)
804 		kvp_pool_init(&kvp->kvp_pool[i]);
805 
806 	/* Initialize 'Auto' pool */
807 	for (i = 0; i < nitems(kvp_pool_auto); i++) {
808 		if (kvp_pool_insert(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
809 		    kvp_pool_auto[i].keyname, kvp_pool_auto[i].value,
810 		    strlen(kvp_pool_auto[i].value), HV_KVP_REG_SZ))
811 			DPRINTF("%s: failed to insert into 'Auto' pool\n",
812 			    sc->sc_dev.dv_xname);
813 	}
814 
815 	sc->sc_pvbus->hv_kvop = hv_kvop;
816 	sc->sc_pvbus->hv_arg = dv;
817 
818 	return (0);
819 }
820 
821 static int
822 nibble(int ch)
823 {
824 	if (ch >= '0' && ch <= '9')
825 		return (ch - '0');
826 	if (ch >= 'A' && ch <= 'F')
827 		return (10 + ch - 'A');
828 	if (ch >= 'a' && ch <= 'f')
829 		return (10 + ch - 'a');
830 	return (-1);
831 }
832 
833 static int
834 kvp_get_ip_info(struct hv_kvp *kvp, const uint8_t *mac, uint8_t *family,
835     uint8_t *addr, uint8_t *netmask, size_t addrlen)
836 {
837 	struct ifnet *ifp;
838 	struct ifaddr *ifa, *ifa6, *ifa6ll;
839 	struct sockaddr_in *sin;
840 	struct sockaddr_in6 *sin6, sa6;
841 	uint8_t	enaddr[ETHER_ADDR_LEN];
842 	uint8_t ipaddr[INET6_ADDRSTRLEN];
843 	int i, j, lo, hi, s, af;
844 
845 	/* Convert from the UTF-16LE string format to binary */
846 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 6) {
847 		if ((hi = nibble(mac[i])) == -1 ||
848 		    (lo = nibble(mac[i+2])) == -1)
849 			return (-1);
850 		enaddr[j++] = hi << 4 | lo;
851 	}
852 
853 	switch (*family) {
854 	case ADDR_FAMILY_NONE:
855 		af = AF_UNSPEC;
856 		break;
857 	case ADDR_FAMILY_IPV4:
858 		af = AF_INET;
859 		break;
860 	case ADDR_FAMILY_IPV6:
861 		af = AF_INET6;
862 		break;
863 	default:
864 		return (-1);
865 	}
866 
867 	KERNEL_LOCK();
868 	s = splnet();
869 
870 	TAILQ_FOREACH(ifp, &ifnet, if_list) {
871 		if (!memcmp(LLADDR(ifp->if_sadl), enaddr, ETHER_ADDR_LEN))
872 			break;
873 	}
874 	if (ifp == NULL) {
875 		splx(s);
876 		KERNEL_UNLOCK();
877 		return (-1);
878 	}
879 
880 	ifa6 = ifa6ll = NULL;
881 
882 	/* Try to find a best matching address, preferring IPv4 */
883 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
884 		/*
885 		 * First IPv4 address is always a best match unless
886 		 * we were asked for for an IPv6 address.
887 		 */
888 		if ((af == AF_INET || af == AF_UNSPEC) &&
889 		    (ifa->ifa_addr->sa_family == AF_INET)) {
890 			af = AF_INET;
891 			goto found;
892 		}
893 		if ((af == AF_INET6 || af == AF_UNSPEC) &&
894 		    (ifa->ifa_addr->sa_family == AF_INET6)) {
895 			if (!IN6_IS_ADDR_LINKLOCAL(
896 			    &satosin6(ifa->ifa_addr)->sin6_addr)) {
897 				/* Done if we're looking for an IPv6 address */
898 				if (af == AF_INET6)
899 					goto found;
900 				/* Stick to the first one */
901 				if (ifa6 == NULL)
902 					ifa6 = ifa;
903 			} else	/* Pick the last one */
904 				ifa6ll = ifa;
905 		}
906 	}
907 	/* If we haven't found any IPv4 or IPv6 direct matches... */
908 	if (ifa == NULL) {
909 		/* ... try the last global IPv6 address... */
910 		if (ifa6 != NULL)
911 			ifa = ifa6;
912 		/* ... or the last link-local...  */
913 		else if (ifa6ll != NULL)
914 			ifa = ifa6ll;
915 		else {
916 			splx(s);
917 			KERNEL_UNLOCK();
918 			return (-1);
919 		}
920 	}
921  found:
922 	switch (af) {
923 	case AF_INET:
924 		sin = satosin(ifa->ifa_addr);
925 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
926 		copyout_utf16le(addr, ipaddr, addrlen, INET_ADDRSTRLEN);
927 
928 		sin = satosin(ifa->ifa_netmask);
929 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
930 		copyout_utf16le(netmask, ipaddr, addrlen, INET_ADDRSTRLEN);
931 
932 		*family = ADDR_FAMILY_IPV4;
933 		break;
934 	case AF_UNSPEC:
935 	case AF_INET6:
936 		sin6 = satosin6(ifa->ifa_addr);
937 		if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
938 			sa6 = *satosin6(ifa->ifa_addr);
939 			sa6.sin6_addr.s6_addr16[1] = 0;
940 			sin6 = &sa6;
941 		}
942 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
943 		copyout_utf16le(addr, ipaddr, addrlen, INET6_ADDRSTRLEN);
944 
945 		sin6 = satosin6(ifa->ifa_netmask);
946 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
947 		copyout_utf16le(netmask, ipaddr, addrlen, INET6_ADDRSTRLEN);
948 
949 		*family = ADDR_FAMILY_IPV6;
950 		break;
951 	}
952 
953 	splx(s);
954 	KERNEL_UNLOCK();
955 
956 	return (0);
957 }
958 
959 static void
960 hv_kvp_process(struct hv_kvp *kvp, struct vmbus_icmsg_kvp *msg)
961 {
962 	union hv_kvp_hdr *kvh = &msg->ic_kvh;
963 	union hv_kvp_msg *kvm = &msg->ic_kvm;
964 
965 	switch (kvh->kvh_op) {
966 	case HV_KVP_OP_SET:
967 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO_EXTERNAL &&
968 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_AUTO_EXTERNAL],
969 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
970 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
971 		    kvm->kvm_val.kvm_valtype)) {
972 			DPRINTF("%s: failed to import into 'Guest/Parameters'"
973 			    " pool\n", __func__);
974 			kvh->kvh_err = HV_KVP_S_CONT;
975 		} else if (kvh->kvh_pool == HV_KVP_POOL_EXTERNAL &&
976 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
977 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
978 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
979 		    kvm->kvm_val.kvm_valtype)) {
980 			DPRINTF("%s: failed to import into 'External' pool\n",
981 			    __func__);
982 			kvh->kvh_err = HV_KVP_S_CONT;
983 		} else if (kvh->kvh_pool != HV_KVP_POOL_AUTO_EXTERNAL &&
984 		    kvh->kvh_pool != HV_KVP_POOL_EXTERNAL) {
985 			kvh->kvh_err = HV_KVP_S_CONT;
986 		} else
987 			kvh->kvh_err = HV_KVP_S_OK;
988 		break;
989 	case HV_KVP_OP_DELETE:
990 		if (kvh->kvh_pool != HV_KVP_POOL_EXTERNAL ||
991 		    kvp_pool_remove(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
992 		    kvm->kvm_del.kvm_key, kvm->kvm_del.kvm_keylen)) {
993 			DPRINTF("%s: failed to remove from 'External' pool\n",
994 			    __func__);
995 			kvh->kvh_err = HV_KVP_S_CONT;
996 		} else
997 			kvh->kvh_err = HV_KVP_S_OK;
998 		break;
999 	case HV_KVP_OP_ENUMERATE:
1000 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO &&
1001 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
1002 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1003 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1004 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1005 			kvh->kvh_err = HV_KVP_S_CONT;
1006 		else if (kvh->kvh_pool == HV_KVP_POOL_GUEST &&
1007 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_GUEST],
1008 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1009 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1010 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1011 			kvh->kvh_err = HV_KVP_S_CONT;
1012 		else
1013 			kvh->kvh_err = HV_KVP_S_OK;
1014 		break;
1015 	case HV_KVP_OP_GET_IP_INFO:
1016 		if (VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver) <= 4) {
1017 			struct vmbus_icmsg_kvp_addr *amsg;
1018 			struct hv_kvp_msg_addr *kva;
1019 
1020 			amsg = (struct vmbus_icmsg_kvp_addr *)msg;
1021 			kva = &amsg->ic_kvm;
1022 
1023 			if (kvp_get_ip_info(kvp, kva->kvm_mac,
1024 			    &kva->kvm_family, kva->kvm_addr,
1025 			    kva->kvm_netmask, sizeof(kva->kvm_addr)))
1026 				kvh->kvh_err = HV_KVP_S_CONT;
1027 			else
1028 				kvh->kvh_err = HV_KVP_S_OK;
1029 		} else {
1030 			DPRINTF("KVP GET_IP_INFO fw %u.%u msg %u.%u dsize=%u\n",
1031 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_fwver),
1032 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_fwver),
1033 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver),
1034 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_msgver),
1035 			    msg->ic_hdr.ic_dsize);
1036 			kvh->kvh_err = HV_KVP_S_CONT;
1037 		}
1038 		break;
1039 	default:
1040 		DPRINTF("KVP message op %u pool %u\n", kvh->kvh_op,
1041 		    kvh->kvh_pool);
1042 		kvh->kvh_err = HV_KVP_S_CONT;
1043 	}
1044 }
1045 
1046 void
1047 hv_kvp(void *arg)
1048 {
1049 	struct hv_ic_dev *dv = arg;
1050 	struct hv_channel *ch = dv->dv_ch;
1051 	struct hv_softc *sc = ch->ch_sc;
1052 	struct hv_kvp *kvp = dv->dv_priv;
1053 	struct vmbus_icmsg_hdr *hdr;
1054 	uint64_t rid;
1055 	uint32_t fwver, msgver, rlen;
1056 	int rv;
1057 
1058 	for (;;) {
1059 		rv = hv_channel_recv(ch, dv->dv_buf, 2 * PAGE_SIZE,
1060 		    &rlen, &rid, 0);
1061 		if (rv || rlen == 0) {
1062 			if (rv != EAGAIN)
1063 				DPRINTF("%s: kvp rv=%d rlen=%u\n",
1064 				    sc->sc_dev.dv_xname, rv, rlen);
1065 			return;
1066 		}
1067 		if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
1068 			DPRINTF("%s: kvp short read rlen=%u\n",
1069 			    sc->sc_dev.dv_xname, rlen);
1070 			return;
1071 		}
1072 		hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
1073 		switch (hdr->ic_type) {
1074 		case VMBUS_ICMSG_TYPE_NEGOTIATE:
1075 			switch (sc->sc_proto) {
1076 			case VMBUS_VERSION_WS2008:
1077 				fwver = VMBUS_IC_VERSION(1, 0);
1078 				msgver = VMBUS_IC_VERSION(1, 0);
1079 				break;
1080 			case VMBUS_VERSION_WIN7:
1081 				fwver = VMBUS_IC_VERSION(3, 0);
1082 				msgver = VMBUS_IC_VERSION(3, 0);
1083 				break;
1084 			default:
1085 				fwver = VMBUS_IC_VERSION(3, 0);
1086 				msgver = VMBUS_IC_VERSION(4, 0);
1087 			}
1088 			hv_ic_negotiate(hdr, &rlen, fwver, msgver);
1089 			break;
1090 		case VMBUS_ICMSG_TYPE_KVP:
1091 			if (hdr->ic_dsize >= sizeof(union hv_kvp_hdr))
1092 				hv_kvp_process(kvp,
1093 				    (struct vmbus_icmsg_kvp *)hdr);
1094 			else
1095 				printf("%s: message too short: %u\n",
1096 				    sc->sc_dev.dv_xname, hdr->ic_dsize);
1097 			break;
1098 		default:
1099 			printf("%s: unhandled kvp message type %u\n",
1100 			    sc->sc_dev.dv_xname, hdr->ic_type);
1101 			continue;
1102 		}
1103 		hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION |
1104 		    VMBUS_ICMSG_FLAG_RESPONSE;
1105 		hv_channel_send(ch, dv->dv_buf, rlen, rid,
1106 		    VMBUS_CHANPKT_TYPE_INBAND, 0);
1107 	}
1108 }
1109 
1110 static int
1111 kvp_poolname(char **key)
1112 {
1113 	char *p;
1114 	int i, rv = -1;
1115 
1116 	if ((p = strrchr(*key, '/')) == NULL)
1117 		return (rv);
1118 	*p = '\0';
1119 	for (i = 0; i < nitems(kvp_pools); i++) {
1120 		if (strncasecmp(*key, kvp_pools[i].poolname,
1121 		    kvp_pools[i].poolnamelen) == 0) {
1122 			rv = kvp_pools[i].poolidx;
1123 			break;
1124 		}
1125 	}
1126 	if (rv >= 0)
1127 		*key = ++p;
1128 	return (rv);
1129 }
1130 
1131 int
1132 hv_kvop(void *arg, int op, char *key, char *val, size_t vallen)
1133 {
1134 	struct hv_ic_dev *dv = arg;
1135 	struct hv_kvp *kvp = dv->dv_priv;
1136 	struct kvp_pool *kvpl;
1137 	int next, pool, error = 0;
1138 	char *vp = val;
1139 	size_t keylen;
1140 
1141 	pool = kvp_poolname(&key);
1142 	if (pool == -1)
1143 		return (EINVAL);
1144 
1145 	kvpl = &kvp->kvp_pool[pool];
1146 	if (strlen(key) == 0) {
1147 		for (next = 0; next < MAXPOOLENTS; next++) {
1148 			if ((val + vallen < vp + HV_KVP_MAX_KEY_SIZE / 2) ||
1149 			    kvp_pool_keys(kvpl, next, vp, &keylen))
1150 				goto out;
1151 			if (strlcat(val, "\n", vallen) >= vallen)
1152 				goto out;
1153 			vp += keylen;
1154 		}
1155  out:
1156 		if (vp > val)
1157 			*(vp - 1) = '\0';
1158 		return (0);
1159 	}
1160 
1161 	if (op == PVBUS_KVWRITE) {
1162 		if (pool == HV_KVP_POOL_AUTO)
1163 			error = kvp_pool_update(kvpl, key, val, vallen,
1164 			    HV_KVP_REG_SZ);
1165 		else if (pool == HV_KVP_POOL_GUEST)
1166 			error = kvp_pool_insert(kvpl, key, val, vallen,
1167 			    HV_KVP_REG_SZ);
1168 		else
1169 			error = EINVAL;
1170 	} else
1171 		error = kvp_pool_extract(kvpl, key, val, vallen);
1172 
1173 	return (error);
1174 }
1175