xref: /openbsd-src/sys/dev/pv/hypervic.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*-
2  * Copyright (c) 2009-2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/pool.h>
55 #include <sys/timetc.h>
56 #include <sys/task.h>
57 #include <sys/syslog.h>
58 #include <sys/socket.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/cpufunc.h>
63 
64 #include <machine/i82489var.h>
65 
66 #include <dev/rndvar.h>
67 
68 #include <net/if.h>
69 #include <net/if_dl.h>
70 #include <netinet/in.h>
71 #include <netinet/if_ether.h>
72 
73 #include <dev/pv/pvvar.h>
74 #include <dev/pv/pvreg.h>
75 #include <dev/pv/hypervreg.h>
76 #include <dev/pv/hypervvar.h>
77 #include <dev/pv/hypervicreg.h>
78 
79 struct hv_ic_dev;
80 
81 #define NKVPPOOLS			4
82 #define MAXPOOLENTS			1023
83 
84 struct kvp_entry {
85 	int				kpe_index;
86 	uint32_t			kpe_valtype;
87 	uint8_t				kpe_key[HV_KVP_MAX_KEY_SIZE / 2];
88 	uint8_t				kpe_val[HV_KVP_MAX_VAL_SIZE / 2];
89 	TAILQ_ENTRY(kvp_entry)		kpe_entry;
90 };
91 TAILQ_HEAD(kvp_list, kvp_entry);
92 
93 struct kvp_pool {
94 	struct kvp_list			kvp_entries;
95 	struct mutex			kvp_lock;
96 	u_int				kvp_index;
97 };
98 
99 struct pool				kvp_entry_pool;
100 
101 struct hv_kvp {
102 	struct kvp_pool			kvp_pool[NKVPPOOLS];
103 };
104 
105 int	hv_heartbeat_attach(struct hv_ic_dev *);
106 void	hv_heartbeat(void *);
107 int	hv_kvp_attach(struct hv_ic_dev *);
108 void	hv_kvp(void *);
109 int	hv_kvop(void *, int, char *, char *, size_t);
110 int	hv_shutdown_attach(struct hv_ic_dev *);
111 void	hv_shutdown(void *);
112 int	hv_timesync_attach(struct hv_ic_dev *);
113 void	hv_timesync(void *);
114 
115 static struct hv_ic_dev {
116 	const char		 *dv_name;
117 	const struct hv_guid	 *dv_type;
118 	int			(*dv_attach)(struct hv_ic_dev *);
119 	void			(*dv_handler)(void *);
120 	struct hv_channel	 *dv_ch;
121 	uint8_t			 *dv_buf;
122 	void			 *dv_priv;
123 } hv_ic_devs[] = {
124 	{
125 		"heartbeat",
126 		&hv_guid_heartbeat,
127 		hv_heartbeat_attach,
128 		hv_heartbeat
129 	},
130 	{
131 		"kvp",
132 		&hv_guid_kvp,
133 		hv_kvp_attach,
134 		hv_kvp
135 	},
136 	{
137 		"shutdown",
138 		&hv_guid_shutdown,
139 		hv_shutdown_attach,
140 		hv_shutdown
141 	},
142 	{
143 		"timesync",
144 		&hv_guid_timesync,
145 		hv_timesync_attach,
146 		hv_timesync
147 	}
148 };
149 
150 static const struct {
151 	enum hv_kvp_pool		 poolidx;
152 	const char			*poolname;
153 	size_t				 poolnamelen;
154 } kvp_pools[] = {
155 	{ HV_KVP_POOL_EXTERNAL,		"External",	sizeof("External") },
156 	{ HV_KVP_POOL_GUEST,		"Guest",	sizeof("Guest")	},
157 	{ HV_KVP_POOL_AUTO,		"Auto",		sizeof("Auto") },
158 	{ HV_KVP_POOL_AUTO_EXTERNAL,	"Guest/Parameters",
159 	  sizeof("Guest/Parameters") }
160 };
161 
162 static const struct {
163 	int				 keyidx;
164 	const char			*keyname;
165 	const char			*value;
166 } kvp_pool_auto[] = {
167 	{ 0, "FullyQualifiedDomainName",	hostname },
168 	{ 1, "IntegrationServicesVersion",	"6.6.6"	},
169 	{ 2, "NetworkAddressIPv4",		"127.0.0.1" },
170 	{ 3, "NetworkAddressIPv6",		"::1" },
171 	{ 4, "OSBuildNumber",			osversion },
172 	{ 5, "OSName",				ostype },
173 	{ 6, "OSMajorVersion",			"6" }, /* free commit for mike */
174 	{ 7, "OSMinorVersion",			&osrelease[2] },
175 	{ 8, "OSVersion",			osrelease },
176 #ifdef __amd64__ /* As specified in SYSTEM_INFO.wProcessorArchitecture */
177 	{ 9, "ProcessorArchitecture",		"9" }
178 #else
179 	{ 9, "ProcessorArchitecture",		"0" }
180 #endif
181 };
182 
183 void
184 hv_attach_icdevs(struct hv_softc *sc)
185 {
186 	struct hv_ic_dev *dv;
187 	struct hv_channel *ch;
188 	int i, header = 0;
189 
190 	for (i = 0; i < nitems(hv_ic_devs); i++) {
191 		dv = &hv_ic_devs[i];
192 
193 		TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
194 			if (ch->ch_state != HV_CHANSTATE_OFFERED)
195 				continue;
196 			if (ch->ch_flags & CHF_MONITOR)
197 				continue;
198 			if (memcmp(dv->dv_type, &ch->ch_type,
199 			    sizeof(ch->ch_type)) == 0)
200 				break;
201 		}
202 		if (ch == NULL)
203 			continue;
204 
205 		dv->dv_ch = ch;
206 
207 		/*
208 		 * These services are not performance critical and
209 		 * do not need batched reading. Furthermore, some
210 		 * services such as KVP can only handle one message
211 		 * from the host at a time.
212 		 */
213 		dv->dv_ch->ch_flags &= ~CHF_BATCHED;
214 
215 		if (dv->dv_attach && dv->dv_attach(dv) != 0)
216 			continue;
217 
218 		if (hv_channel_open(ch, VMBUS_IC_BUFRINGSIZE, NULL, 0,
219 		    dv->dv_handler, dv)) {
220 			printf("%s: failed to open channel for %s\n",
221 			    sc->sc_dev.dv_xname, dv->dv_name);
222 			continue;
223 		}
224 		evcount_attach(&ch->ch_evcnt, dv->dv_name, &sc->sc_idtvec);
225 
226 		if (!header) {
227 			printf("%s: %s", sc->sc_dev.dv_xname, dv->dv_name);
228 			header = 1;
229 		} else
230 			printf(", %s", dv->dv_name);
231 	}
232 	if (header)
233 		printf("\n");
234 }
235 
236 static inline void
237 hv_ic_negotiate(struct vmbus_icmsg_hdr *hdr, uint32_t *rlen, uint32_t fwver,
238     uint32_t msgver)
239 {
240 	struct vmbus_icmsg_negotiate *msg;
241 	uint16_t propmin, propmaj, chosenmaj, chosenmin;
242 	int i;
243 
244 	msg = (struct vmbus_icmsg_negotiate *)hdr;
245 
246 	chosenmaj = chosenmin = 0;
247 	for (i = 0; i < msg->ic_fwver_cnt; i++) {
248 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
249 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
250 		if (propmaj > chosenmaj &&
251 		    propmaj <= VMBUS_ICVER_MAJOR(fwver) &&
252 		    propmin >= chosenmin &&
253 		    propmin <= VMBUS_ICVER_MINOR(fwver)) {
254 			chosenmaj = propmaj;
255 			chosenmin = propmin;
256 		}
257 	}
258 	fwver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
259 
260 	chosenmaj = chosenmin = 0;
261 	for (; i < msg->ic_fwver_cnt + msg->ic_msgver_cnt; i++) {
262 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
263 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
264 		if (propmaj > chosenmaj &&
265 		    propmaj <= VMBUS_ICVER_MAJOR(msgver) &&
266 		    propmin >= chosenmin &&
267 		    propmin <= VMBUS_ICVER_MINOR(msgver)) {
268 			chosenmaj = propmaj;
269 			chosenmin = propmin;
270 		}
271 	}
272 	msgver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
273 
274 	msg->ic_fwver_cnt = 1;
275 	msg->ic_ver[0] = fwver;
276 	msg->ic_msgver_cnt = 1;
277 	msg->ic_ver[1] = msgver;
278 	hdr->ic_dsize = sizeof(*msg) + 2 * sizeof(uint32_t) -
279 	    sizeof(struct vmbus_icmsg_hdr);
280 	if (*rlen < sizeof(*msg) + 2 * sizeof(uint32_t))
281 		*rlen = sizeof(*msg) + 2 * sizeof(uint32_t);
282 }
283 
284 int
285 hv_heartbeat_attach(struct hv_ic_dev *dv)
286 {
287 	struct hv_channel *ch = dv->dv_ch;
288 	struct hv_softc *sc = ch->ch_sc;
289 
290 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
291 	    (cold ? M_NOWAIT : M_WAITOK));
292 	if (dv->dv_buf == NULL) {
293 		printf("%s: failed to allocate receive buffer\n",
294 		    sc->sc_dev.dv_xname);
295 		return (-1);
296 	}
297 	return (0);
298 }
299 
300 void
301 hv_heartbeat(void *arg)
302 {
303 	struct hv_ic_dev *dv = arg;
304 	struct hv_channel *ch = dv->dv_ch;
305 	struct hv_softc *sc = ch->ch_sc;
306 	struct vmbus_icmsg_hdr *hdr;
307 	struct vmbus_icmsg_heartbeat *msg;
308 	uint64_t rid;
309 	uint32_t rlen;
310 	int rv;
311 
312 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
313 	if (rv || rlen == 0) {
314 		if (rv != EAGAIN)
315 			DPRINTF("%s: heartbeat rv=%d rlen=%u\n",
316 			    sc->sc_dev.dv_xname, rv, rlen);
317 		return;
318 	}
319 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
320 		DPRINTF("%s: heartbeat short read rlen=%u\n",
321 			    sc->sc_dev.dv_xname, rlen);
322 		return;
323 	}
324 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
325 	switch (hdr->ic_type) {
326 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
327 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
328 		    VMBUS_IC_VERSION(3, 0));
329 		break;
330 	case VMBUS_ICMSG_TYPE_HEARTBEAT:
331 		msg = (struct vmbus_icmsg_heartbeat *)hdr;
332 		msg->ic_seq += 1;
333 		break;
334 	default:
335 		printf("%s: unhandled heartbeat message type %u\n",
336 		    sc->sc_dev.dv_xname, hdr->ic_type);
337 		return;
338 	}
339 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
340 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
341 }
342 
343 static void
344 hv_shutdown_task(void *arg)
345 {
346 	struct hv_softc *sc = arg;
347 	pvbus_shutdown(&sc->sc_dev);
348 }
349 
350 int
351 hv_shutdown_attach(struct hv_ic_dev *dv)
352 {
353 	struct hv_channel *ch = dv->dv_ch;
354 	struct hv_softc *sc = ch->ch_sc;
355 
356 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
357 	    (cold ? M_NOWAIT : M_WAITOK));
358 	if (dv->dv_buf == NULL) {
359 		printf("%s: failed to allocate receive buffer\n",
360 		    sc->sc_dev.dv_xname);
361 		return (-1);
362 	}
363 
364 	task_set(&sc->sc_sdtask, hv_shutdown_task, sc);
365 
366 	return (0);
367 }
368 
369 void
370 hv_shutdown(void *arg)
371 {
372 	struct hv_ic_dev *dv = arg;
373 	struct hv_channel *ch = dv->dv_ch;
374 	struct hv_softc *sc = ch->ch_sc;
375 	struct vmbus_icmsg_hdr *hdr;
376 	struct vmbus_icmsg_shutdown *msg;
377 	uint64_t rid;
378 	uint32_t rlen;
379 	int rv, shutdown = 0;
380 
381 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
382 	if (rv || rlen == 0) {
383 		if (rv != EAGAIN)
384 			DPRINTF("%s: shutdown rv=%d rlen=%u\n",
385 			    sc->sc_dev.dv_xname, rv, rlen);
386 		return;
387 	}
388 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
389 		DPRINTF("%s: shutdown short read rlen=%u\n",
390 			    sc->sc_dev.dv_xname, rlen);
391 		return;
392 	}
393 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
394 	switch (hdr->ic_type) {
395 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
396 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
397 		    VMBUS_IC_VERSION(3, 0));
398 		break;
399 	case VMBUS_ICMSG_TYPE_SHUTDOWN:
400 		msg = (struct vmbus_icmsg_shutdown *)hdr;
401 		if (msg->ic_haltflags == 0 || msg->ic_haltflags == 1) {
402 			shutdown = 1;
403 			hdr->ic_status = VMBUS_ICMSG_STATUS_OK;
404 		} else
405 			hdr->ic_status = VMBUS_ICMSG_STATUS_FAIL;
406 		break;
407 	default:
408 		printf("%s: unhandled shutdown message type %u\n",
409 		    sc->sc_dev.dv_xname, hdr->ic_type);
410 		return;
411 	}
412 
413 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
414 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
415 
416 	if (shutdown)
417 		task_add(systq, &sc->sc_sdtask);
418 }
419 
420 int
421 hv_timesync_attach(struct hv_ic_dev *dv)
422 {
423 	struct hv_channel *ch = dv->dv_ch;
424 	struct hv_softc *sc = ch->ch_sc;
425 
426 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
427 	    (cold ? M_NOWAIT : M_WAITOK));
428 	if (dv->dv_buf == NULL) {
429 		printf("%s: failed to allocate receive buffer\n",
430 		    sc->sc_dev.dv_xname);
431 		return (-1);
432 	}
433 
434 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
435 	    sizeof(sc->sc_sensordev.xname));
436 
437 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
438 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
439 
440 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
441 	sensordev_install(&sc->sc_sensordev);
442 
443 	return (0);
444 }
445 
446 void
447 hv_timesync(void *arg)
448 {
449 	struct hv_ic_dev *dv = arg;
450 	struct hv_channel *ch = dv->dv_ch;
451 	struct hv_softc *sc = ch->ch_sc;
452 	struct vmbus_icmsg_hdr *hdr;
453 	struct vmbus_icmsg_timesync *msg;
454 	struct timespec guest, host, diff;
455 	uint64_t tns;
456 	uint64_t rid;
457 	uint32_t rlen;
458 	int rv;
459 
460 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
461 	if (rv || rlen == 0) {
462 		if (rv != EAGAIN)
463 			DPRINTF("%s: timesync rv=%d rlen=%u\n",
464 			    sc->sc_dev.dv_xname, rv, rlen);
465 		return;
466 	}
467 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
468 		DPRINTF("%s: timesync short read rlen=%u\n",
469 			    sc->sc_dev.dv_xname, rlen);
470 		return;
471 	}
472 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
473 	switch (hdr->ic_type) {
474 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
475 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
476 		    VMBUS_IC_VERSION(3, 0));
477 		break;
478 	case VMBUS_ICMSG_TYPE_TIMESYNC:
479 		msg = (struct vmbus_icmsg_timesync *)hdr;
480 		if (msg->ic_tsflags == VMBUS_ICMSG_TS_FLAG_SAMPLE) {
481 			microtime(&sc->sc_sensor.tv);
482 			nanotime(&guest);
483 			tns = (msg->ic_hvtime - 116444736000000000LL) * 100;
484 			host.tv_sec = tns / 1000000000LL;
485 			host.tv_nsec = tns % 1000000000LL;
486 			timespecsub(&guest, &host, &diff);
487 			sc->sc_sensor.value = (int64_t)diff.tv_sec *
488 			    1000000000LL + diff.tv_nsec;
489 			sc->sc_sensor.status = SENSOR_S_OK;
490 		}
491 		break;
492 	default:
493 		printf("%s: unhandled timesync message type %u\n",
494 		    sc->sc_dev.dv_xname, hdr->ic_type);
495 		return;
496 	}
497 
498 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
499 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
500 }
501 
502 static inline int
503 copyout_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
504 {
505 	const uint8_t *sp = src;
506 	uint8_t *dp = dst;
507 	int i, j;
508 
509 	KASSERT(dlen >= slen * 2);
510 
511 	for (i = j = 0; i < slen; i++, j += 2) {
512 		dp[j] = sp[i];
513 		dp[j + 1] = '\0';
514 	}
515 	return (j);
516 }
517 
518 static inline int
519 copyin_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
520 {
521 	const uint8_t *sp = src;
522 	uint8_t *dp = dst;
523 	int i, j;
524 
525 	KASSERT(dlen >= slen / 2);
526 
527 	for (i = j = 0; i < slen; i += 2, j++)
528 		dp[j] = sp[i];
529 	return (j);
530 }
531 
532 static inline int
533 keycmp_utf16le(const uint8_t *key, const uint8_t *ukey, size_t ukeylen)
534 {
535 	int i, j;
536 
537 	for (i = j = 0; i < ukeylen; i += 2, j++) {
538 		if (key[j] != ukey[i])
539 			return (key[j] > ukey[i] ?
540 			    key[j] - ukey[i] :
541 			    ukey[i] - key[j]);
542 	}
543 	return (0);
544 }
545 
546 static void
547 kvp_pool_init(struct kvp_pool *kvpl)
548 {
549 	TAILQ_INIT(&kvpl->kvp_entries);
550 	mtx_init(&kvpl->kvp_lock, IPL_NET);
551 	kvpl->kvp_index = 0;
552 }
553 
554 static int
555 kvp_pool_insert(struct kvp_pool *kvpl, const char *key, const char *val,
556     uint32_t vallen, uint32_t valtype)
557 {
558 	struct kvp_entry *kpe;
559 	int keylen = strlen(key);
560 
561 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
562 		return (ERANGE);
563 
564 	mtx_enter(&kvpl->kvp_lock);
565 
566 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
567 		if (strcmp(kpe->kpe_key, key) == 0) {
568 			mtx_leave(&kvpl->kvp_lock);
569 			return (EEXIST);
570 		}
571 	}
572 
573 	kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
574 	if (kpe == NULL) {
575 		mtx_leave(&kvpl->kvp_lock);
576 		return (ENOMEM);
577 	}
578 
579 	strlcpy(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2);
580 
581 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
582 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
583 	else
584 		memcpy(kpe->kpe_val, val, vallen);
585 
586 	kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
587 
588 	TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
589 
590 	mtx_leave(&kvpl->kvp_lock);
591 
592 	return (0);
593 }
594 
595 static int
596 kvp_pool_update(struct kvp_pool *kvpl, const char *key, const char *val,
597     uint32_t vallen, uint32_t valtype)
598 {
599 	struct kvp_entry *kpe;
600 	int keylen = strlen(key);
601 
602 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
603 		return (ERANGE);
604 
605 	mtx_enter(&kvpl->kvp_lock);
606 
607 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
608 		if (strcmp(kpe->kpe_key, key) == 0)
609 			break;
610 	}
611 	if (kpe == NULL) {
612 		mtx_leave(&kvpl->kvp_lock);
613 		return (ENOENT);
614 	}
615 
616 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
617 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
618 	else
619 		memcpy(kpe->kpe_val, val, vallen);
620 
621 	mtx_leave(&kvpl->kvp_lock);
622 
623 	return (0);
624 }
625 
626 static int
627 kvp_pool_import(struct kvp_pool *kvpl, const char *key, uint32_t keylen,
628     const char *val, uint32_t vallen, uint32_t valtype)
629 {
630 	struct kvp_entry *kpe;
631 
632 	if (keylen > HV_KVP_MAX_KEY_SIZE ||
633 	    vallen > HV_KVP_MAX_VAL_SIZE)
634 		return (ERANGE);
635 
636 	mtx_enter(&kvpl->kvp_lock);
637 
638 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
639 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
640 			break;
641 	}
642 	if (kpe == NULL) {
643 		kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
644 		if (kpe == NULL) {
645 			mtx_leave(&kvpl->kvp_lock);
646 			return (ENOMEM);
647 		}
648 
649 		copyin_utf16le(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2,
650 		    keylen);
651 
652 		kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
653 
654 		TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
655 	}
656 
657 	copyin_utf16le(kpe->kpe_val, val, HV_KVP_MAX_VAL_SIZE / 2, vallen);
658 	kpe->kpe_valtype = valtype;
659 
660 	mtx_leave(&kvpl->kvp_lock);
661 
662 	return (0);
663 }
664 
665 static int
666 kvp_pool_export(struct kvp_pool *kvpl, uint32_t index, char *key,
667     uint32_t *keylen, char *val, uint32_t *vallen, uint32_t *valtype)
668 {
669 	struct kvp_entry *kpe;
670 
671 	mtx_enter(&kvpl->kvp_lock);
672 
673 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
674 		if (kpe->kpe_index == index)
675 			break;
676 	}
677 	if (kpe == NULL) {
678 		mtx_leave(&kvpl->kvp_lock);
679 		return (ENOENT);
680 	}
681 
682 	*keylen = copyout_utf16le(key, kpe->kpe_key, HV_KVP_MAX_KEY_SIZE,
683 	    strlen(kpe->kpe_key) + 1);
684 	*vallen = copyout_utf16le(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE,
685 	    strlen(kpe->kpe_val) + 1);
686 	*valtype = kpe->kpe_valtype;
687 
688 	mtx_leave(&kvpl->kvp_lock);
689 
690 	return (0);
691 }
692 
693 static int
694 kvp_pool_remove(struct kvp_pool *kvpl, const char *key, uint32_t keylen)
695 {
696 	struct kvp_entry *kpe;
697 
698 	mtx_enter(&kvpl->kvp_lock);
699 
700 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
701 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
702 			break;
703 	}
704 	if (kpe == NULL) {
705 		mtx_leave(&kvpl->kvp_lock);
706 		return (ENOENT);
707 	}
708 
709 	TAILQ_REMOVE(&kvpl->kvp_entries, kpe, kpe_entry);
710 
711 	mtx_leave(&kvpl->kvp_lock);
712 
713 	pool_put(&kvp_entry_pool, kpe);
714 
715 	return (0);
716 }
717 
718 static int
719 kvp_pool_extract(struct kvp_pool *kvpl, const char *key, char *val,
720     uint32_t vallen)
721 {
722 	struct kvp_entry *kpe;
723 
724 	if (vallen < HV_KVP_MAX_VAL_SIZE / 2)
725 		return (ERANGE);
726 
727 	mtx_enter(&kvpl->kvp_lock);
728 
729 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
730 		if (strcmp(kpe->kpe_key, key) == 0)
731 			break;
732 	}
733 	if (kpe == NULL) {
734 		mtx_leave(&kvpl->kvp_lock);
735 		return (ENOENT);
736 	}
737 
738 	switch (kpe->kpe_valtype) {
739 	case HV_KVP_REG_SZ:
740 		strlcpy(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE / 2);
741 		break;
742 	case HV_KVP_REG_U32:
743 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%u",
744 		    *(uint32_t *)kpe->kpe_val);
745 		break;
746 	case HV_KVP_REG_U64:
747 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%llu",
748 		    *(uint64_t *)kpe->kpe_val);
749 		break;
750 	}
751 
752 	mtx_leave(&kvpl->kvp_lock);
753 
754 	return (0);
755 }
756 
757 static int
758 kvp_pool_keys(struct kvp_pool *kvpl, int next, char *key, size_t *keylen)
759 {
760 	struct kvp_entry *kpe;
761 	int iter = 0;
762 
763 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
764 		if (iter++ < next)
765 			continue;
766 		*keylen = strlen(kpe->kpe_key) + 1;
767 		strlcpy(key, kpe->kpe_key, *keylen);
768 		return (0);
769 	}
770 
771 	return (-1);
772 }
773 
774 int
775 hv_kvp_attach(struct hv_ic_dev *dv)
776 {
777 	struct hv_channel *ch = dv->dv_ch;
778 	struct hv_softc *sc = ch->ch_sc;
779 	struct hv_kvp *kvp;
780 	int i;
781 
782 	dv->dv_buf = malloc(2 * PAGE_SIZE, M_DEVBUF, M_ZERO |
783 	    (cold ? M_NOWAIT : M_WAITOK));
784 	if (dv->dv_buf == NULL) {
785 		printf("%s: failed to allocate receive buffer\n",
786 		    sc->sc_dev.dv_xname);
787 		return (-1);
788 	}
789 
790 	dv->dv_priv = malloc(sizeof(struct hv_kvp), M_DEVBUF, M_ZERO |
791 	    (cold ? M_NOWAIT : M_WAITOK));
792 	if (dv->dv_priv == NULL) {
793 		free(dv->dv_buf, M_DEVBUF, 2 * PAGE_SIZE);
794 		printf("%s: failed to allocate KVP private data\n",
795 		    sc->sc_dev.dv_xname);
796 		return (-1);
797 	}
798 	kvp = dv->dv_priv;
799 
800 	pool_init(&kvp_entry_pool, sizeof(struct kvp_entry), 0, IPL_NET, 0,
801 	    "hvkvpl", NULL);
802 
803 	for (i = 0; i < NKVPPOOLS; i++)
804 		kvp_pool_init(&kvp->kvp_pool[i]);
805 
806 	/* Initialize 'Auto' pool */
807 	for (i = 0; i < nitems(kvp_pool_auto); i++) {
808 		if (kvp_pool_insert(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
809 		    kvp_pool_auto[i].keyname, kvp_pool_auto[i].value,
810 		    strlen(kvp_pool_auto[i].value), HV_KVP_REG_SZ))
811 			DPRINTF("%s: failed to insert into 'Auto' pool\n",
812 			    sc->sc_dev.dv_xname);
813 	}
814 
815 	sc->sc_pvbus->hv_kvop = hv_kvop;
816 	sc->sc_pvbus->hv_arg = dv;
817 
818 	return (0);
819 }
820 
821 static int
822 nibble(int ch)
823 {
824 	if (ch >= '0' && ch <= '9')
825 		return (ch - '0');
826 	if (ch >= 'A' && ch <= 'F')
827 		return (10 + ch - 'A');
828 	if (ch >= 'a' && ch <= 'f')
829 		return (10 + ch - 'a');
830 	return (-1);
831 }
832 
833 static int
834 kvp_get_ip_info(struct hv_kvp *kvp, const uint8_t *mac, uint8_t *family,
835     uint8_t *addr, uint8_t *netmask, size_t addrlen)
836 {
837 	struct ifnet *ifp;
838 	struct ifaddr *ifa, *ifa6, *ifa6ll;
839 	struct sockaddr_in *sin;
840 	struct sockaddr_in6 *sin6, sa6;
841 	uint8_t	enaddr[ETHER_ADDR_LEN];
842 	uint8_t ipaddr[INET6_ADDRSTRLEN];
843 	int i, j, lo, hi, s, af;
844 
845 	/* Convert from the UTF-16LE string format to binary */
846 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 6) {
847 		if ((hi = nibble(mac[i])) == -1 ||
848 		    (lo = nibble(mac[i+2])) == -1)
849 			return (-1);
850 		enaddr[j++] = hi << 4 | lo;
851 	}
852 
853 	switch (*family) {
854 	case ADDR_FAMILY_NONE:
855 		af = AF_UNSPEC;
856 		break;
857 	case ADDR_FAMILY_IPV4:
858 		af = AF_INET;
859 		break;
860 	case ADDR_FAMILY_IPV6:
861 		af = AF_INET6;
862 		break;
863 	}
864 
865 	KERNEL_LOCK();
866 	s = splnet();
867 
868 	TAILQ_FOREACH(ifp, &ifnet, if_list) {
869 		if (!memcmp(LLADDR(ifp->if_sadl), enaddr, ETHER_ADDR_LEN))
870 			break;
871 	}
872 	if (ifp == NULL) {
873 		splx(s);
874 		KERNEL_UNLOCK();
875 		return (-1);
876 	}
877 
878 	ifa6 = ifa6ll = NULL;
879 
880 	/* Try to find a best matching address, preferring IPv4 */
881 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
882 		/*
883 		 * First IPv4 address is always a best match unless
884 		 * we were asked for for an IPv6 address.
885 		 */
886 		if ((af == AF_INET || af == AF_UNSPEC) &&
887 		    (ifa->ifa_addr->sa_family == AF_INET)) {
888 			af = AF_INET;
889 			goto found;
890 		}
891 		if ((af == AF_INET6 || af == AF_UNSPEC) &&
892 		    (ifa->ifa_addr->sa_family == AF_INET6)) {
893 			if (!IN6_IS_ADDR_LINKLOCAL(
894 			    &satosin6(ifa->ifa_addr)->sin6_addr)) {
895 				/* Done if we're looking for an IPv6 address */
896 				if (af == AF_INET6)
897 					goto found;
898 				/* Stick to the first one */
899 				if (ifa6 == NULL)
900 					ifa6 = ifa;
901 			} else	/* Pick the last one */
902 				ifa6ll = ifa;
903 		}
904 	}
905 	/* If we haven't found any IPv4 or IPv6 direct matches... */
906 	if (ifa == NULL) {
907 		/* ... try the last global IPv6 address... */
908 		if (ifa6 != NULL)
909 			ifa = ifa6;
910 		/* ... or the last link-local...  */
911 		else if (ifa6ll != NULL)
912 			ifa = ifa6ll;
913 		else {
914 			splx(s);
915 			KERNEL_UNLOCK();
916 			return (-1);
917 		}
918 	}
919  found:
920 	switch (af) {
921 	case AF_INET:
922 		sin = satosin(ifa->ifa_addr);
923 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
924 		copyout_utf16le(addr, ipaddr, addrlen, INET_ADDRSTRLEN);
925 
926 		sin = satosin(ifa->ifa_netmask);
927 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
928 		copyout_utf16le(netmask, ipaddr, addrlen, INET_ADDRSTRLEN);
929 
930 		*family = ADDR_FAMILY_IPV4;
931 		break;
932 	case AF_UNSPEC:
933 	case AF_INET6:
934 		sin6 = satosin6(ifa->ifa_addr);
935 		if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
936 			sa6 = *satosin6(ifa->ifa_addr);
937 			sa6.sin6_addr.s6_addr16[1] = 0;
938 			sin6 = &sa6;
939 		}
940 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
941 		copyout_utf16le(addr, ipaddr, addrlen, INET6_ADDRSTRLEN);
942 
943 		sin6 = satosin6(ifa->ifa_netmask);
944 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
945 		copyout_utf16le(netmask, ipaddr, addrlen, INET6_ADDRSTRLEN);
946 
947 		*family = ADDR_FAMILY_IPV6;
948 		break;
949 	}
950 
951 	splx(s);
952 	KERNEL_UNLOCK();
953 
954 	return (0);
955 }
956 
957 static void
958 hv_kvp_process(struct hv_kvp *kvp, struct vmbus_icmsg_kvp *msg)
959 {
960 	union hv_kvp_hdr *kvh = &msg->ic_kvh;
961 	union hv_kvp_msg *kvm = &msg->ic_kvm;
962 
963 	switch (kvh->kvh_op) {
964 	case HV_KVP_OP_SET:
965 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO_EXTERNAL &&
966 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_AUTO_EXTERNAL],
967 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
968 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
969 		    kvm->kvm_val.kvm_valtype)) {
970 			DPRINTF("%s: failed to import into 'Guest/Parameters'"
971 			    " pool\n", __func__);
972 			kvh->kvh_err = HV_KVP_S_CONT;
973 		} else if (kvh->kvh_pool == HV_KVP_POOL_EXTERNAL &&
974 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
975 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
976 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
977 		    kvm->kvm_val.kvm_valtype)) {
978 			DPRINTF("%s: failed to import into 'External' pool\n",
979 			    __func__);
980 			kvh->kvh_err = HV_KVP_S_CONT;
981 		} else if (kvh->kvh_pool != HV_KVP_POOL_AUTO_EXTERNAL &&
982 		    kvh->kvh_pool != HV_KVP_POOL_EXTERNAL) {
983 			kvh->kvh_err = HV_KVP_S_CONT;
984 		} else
985 			kvh->kvh_err = HV_KVP_S_OK;
986 		break;
987 	case HV_KVP_OP_DELETE:
988 		if (kvh->kvh_pool != HV_KVP_POOL_EXTERNAL ||
989 		    kvp_pool_remove(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
990 		    kvm->kvm_del.kvm_key, kvm->kvm_del.kvm_keylen)) {
991 			DPRINTF("%s: failed to remove from 'External' pool\n",
992 			    __func__);
993 			kvh->kvh_err = HV_KVP_S_CONT;
994 		} else
995 			kvh->kvh_err = HV_KVP_S_OK;
996 		break;
997 	case HV_KVP_OP_ENUMERATE:
998 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO &&
999 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
1000 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1001 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1002 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1003 			kvh->kvh_err = HV_KVP_S_CONT;
1004 		else if (kvh->kvh_pool == HV_KVP_POOL_GUEST &&
1005 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_GUEST],
1006 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1007 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1008 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1009 			kvh->kvh_err = HV_KVP_S_CONT;
1010 		else
1011 			kvh->kvh_err = HV_KVP_S_OK;
1012 		break;
1013 	case HV_KVP_OP_GET_IP_INFO:
1014 		if (VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver) <= 4) {
1015 			struct vmbus_icmsg_kvp_addr *amsg;
1016 			struct hv_kvp_msg_addr *kva;
1017 
1018 			amsg = (struct vmbus_icmsg_kvp_addr *)msg;
1019 			kva = &amsg->ic_kvm;
1020 
1021 			if (kvp_get_ip_info(kvp, kva->kvm_mac,
1022 			    &kva->kvm_family, kva->kvm_addr,
1023 			    kva->kvm_netmask, sizeof(kva->kvm_addr)))
1024 				kvh->kvh_err = HV_KVP_S_CONT;
1025 			else
1026 				kvh->kvh_err = HV_KVP_S_OK;
1027 		} else {
1028 			DPRINTF("KVP GET_IP_INFO fw %u.%u msg %u.%u dsize=%u\n",
1029 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_fwver),
1030 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_fwver),
1031 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver),
1032 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_msgver),
1033 			    msg->ic_hdr.ic_dsize);
1034 			kvh->kvh_err = HV_KVP_S_CONT;
1035 		}
1036 		break;
1037 	default:
1038 		DPRINTF("KVP message op %u pool %u\n", kvh->kvh_op,
1039 		    kvh->kvh_pool);
1040 		kvh->kvh_err = HV_KVP_S_CONT;
1041 	}
1042 }
1043 
1044 void
1045 hv_kvp(void *arg)
1046 {
1047 	struct hv_ic_dev *dv = arg;
1048 	struct hv_channel *ch = dv->dv_ch;
1049 	struct hv_softc *sc = ch->ch_sc;
1050 	struct hv_kvp *kvp = dv->dv_priv;
1051 	struct vmbus_icmsg_hdr *hdr;
1052 	uint64_t rid;
1053 	uint32_t fwver, msgver, rlen;
1054 	int rv;
1055 
1056 	for (;;) {
1057 		rv = hv_channel_recv(ch, dv->dv_buf, 2 * PAGE_SIZE,
1058 		    &rlen, &rid, 0);
1059 		if (rv || rlen == 0) {
1060 			if (rv != EAGAIN)
1061 				DPRINTF("%s: kvp rv=%d rlen=%u\n",
1062 				    sc->sc_dev.dv_xname, rv, rlen);
1063 			return;
1064 		}
1065 		if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
1066 			DPRINTF("%s: kvp short read rlen=%u\n",
1067 			    sc->sc_dev.dv_xname, rlen);
1068 			return;
1069 		}
1070 		hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
1071 		switch (hdr->ic_type) {
1072 		case VMBUS_ICMSG_TYPE_NEGOTIATE:
1073 			switch (sc->sc_proto) {
1074 			case VMBUS_VERSION_WS2008:
1075 				fwver = VMBUS_IC_VERSION(1, 0);
1076 				msgver = VMBUS_IC_VERSION(1, 0);
1077 				break;
1078 			case VMBUS_VERSION_WIN7:
1079 				fwver = VMBUS_IC_VERSION(3, 0);
1080 				msgver = VMBUS_IC_VERSION(3, 0);
1081 				break;
1082 			default:
1083 				fwver = VMBUS_IC_VERSION(3, 0);
1084 				msgver = VMBUS_IC_VERSION(4, 0);
1085 			}
1086 			hv_ic_negotiate(hdr, &rlen, fwver, msgver);
1087 			break;
1088 		case VMBUS_ICMSG_TYPE_KVP:
1089 			if (hdr->ic_dsize >= sizeof(union hv_kvp_hdr))
1090 				hv_kvp_process(kvp,
1091 				    (struct vmbus_icmsg_kvp *)hdr);
1092 			else
1093 				printf("%s: message too short: %u\n",
1094 				    sc->sc_dev.dv_xname, hdr->ic_dsize);
1095 			break;
1096 		default:
1097 			printf("%s: unhandled kvp message type %u\n",
1098 			    sc->sc_dev.dv_xname, hdr->ic_type);
1099 			continue;
1100 		}
1101 		hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION |
1102 		    VMBUS_ICMSG_FLAG_RESPONSE;
1103 		hv_channel_send(ch, dv->dv_buf, rlen, rid,
1104 		    VMBUS_CHANPKT_TYPE_INBAND, 0);
1105 	}
1106 }
1107 
1108 static int
1109 kvp_poolname(char **key)
1110 {
1111 	char *p;
1112 	int i, rv = -1;
1113 
1114 	if ((p = strrchr(*key, '/')) == NULL)
1115 		return (rv);
1116 	*p = '\0';
1117 	for (i = 0; i < nitems(kvp_pools); i++) {
1118 		if (strncasecmp(*key, kvp_pools[i].poolname,
1119 		    kvp_pools[i].poolnamelen) == 0) {
1120 			rv = kvp_pools[i].poolidx;
1121 			break;
1122 		}
1123 	}
1124 	if (rv >= 0)
1125 		*key = ++p;
1126 	return (rv);
1127 }
1128 
1129 int
1130 hv_kvop(void *arg, int op, char *key, char *val, size_t vallen)
1131 {
1132 	struct hv_ic_dev *dv = arg;
1133 	struct hv_kvp *kvp = dv->dv_priv;
1134 	struct kvp_pool *kvpl;
1135 	int next, pool, error = 0;
1136 	char *vp = val;
1137 	size_t keylen;
1138 
1139 	pool = kvp_poolname(&key);
1140 	if (pool == -1)
1141 		return (EINVAL);
1142 
1143 	kvpl = &kvp->kvp_pool[pool];
1144 	if (strlen(key) == 0) {
1145 		for (next = 0; next < MAXPOOLENTS; next++) {
1146 			if ((val + vallen < vp + HV_KVP_MAX_KEY_SIZE / 2) ||
1147 			    kvp_pool_keys(kvpl, next, vp, &keylen))
1148 				goto out;
1149 			if (strlcat(val, "\n", vallen) >= vallen)
1150 				goto out;
1151 			vp += keylen;
1152 		}
1153  out:
1154 		if (vp > val)
1155 			*(vp - 1) = '\0';
1156 		return (0);
1157 	}
1158 
1159 	if (op == PVBUS_KVWRITE) {
1160 		if (pool == HV_KVP_POOL_AUTO)
1161 			error = kvp_pool_update(kvpl, key, val, vallen,
1162 			    HV_KVP_REG_SZ);
1163 		else if (pool == HV_KVP_POOL_GUEST)
1164 			error = kvp_pool_insert(kvpl, key, val, vallen,
1165 			    HV_KVP_REG_SZ);
1166 		else
1167 			error = EINVAL;
1168 	} else
1169 		error = kvp_pool_extract(kvpl, key, val, vallen);
1170 
1171 	return (error);
1172 }
1173