xref: /netbsd-src/sys/external/mit/xen-include-public/dist/xen/include/public/sysctl.h (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /******************************************************************************
2  * sysctl.h
3  *
4  * System management operations. For use by node control stack.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Copyright (c) 2002-2006, K Fraser
25  */
26 
27 #ifndef __XEN_PUBLIC_SYSCTL_H__
28 #define __XEN_PUBLIC_SYSCTL_H__
29 
30 #if !defined(__XEN__) && !defined(__XEN_TOOLS__)
31 #error "sysctl operations are intended for use by node control tools only"
32 #endif
33 
34 #include "xen.h"
35 #include "domctl.h"
36 #include "physdev.h"
37 #include "tmem.h"
38 
39 #define XEN_SYSCTL_INTERFACE_VERSION 0x00000011
40 
41 /*
42  * Read console content from Xen buffer ring.
43  */
44 /* XEN_SYSCTL_readconsole */
45 struct xen_sysctl_readconsole {
46     /* IN: Non-zero -> clear after reading. */
47     uint8_t clear;
48     /* IN: Non-zero -> start index specified by @index field. */
49     uint8_t incremental;
50     uint8_t pad0, pad1;
51     /*
52      * IN:  Start index for consuming from ring buffer (if @incremental);
53      * OUT: End index after consuming from ring buffer.
54      */
55     uint32_t index;
56     /* IN: Virtual address to write console data. */
57     XEN_GUEST_HANDLE_64(char) buffer;
58     /* IN: Size of buffer; OUT: Bytes written to buffer. */
59     uint32_t count;
60 };
61 
62 /* Get trace buffers machine base address */
63 /* XEN_SYSCTL_tbuf_op */
64 struct xen_sysctl_tbuf_op {
65     /* IN variables */
66 #define XEN_SYSCTL_TBUFOP_get_info     0
67 #define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
68 #define XEN_SYSCTL_TBUFOP_set_evt_mask 2
69 #define XEN_SYSCTL_TBUFOP_set_size     3
70 #define XEN_SYSCTL_TBUFOP_enable       4
71 #define XEN_SYSCTL_TBUFOP_disable      5
72     uint32_t cmd;
73     /* IN/OUT variables */
74     struct xenctl_bitmap cpu_mask;
75     uint32_t             evt_mask;
76     /* OUT variables */
77     uint64_aligned_t buffer_mfn;
78     uint32_t size;  /* Also an IN variable! */
79 };
80 
81 /*
82  * Get physical information about the host machine
83  */
84 /* XEN_SYSCTL_physinfo */
85  /* (x86) The platform supports HVM guests. */
86 #define _XEN_SYSCTL_PHYSCAP_hvm          0
87 #define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
88  /* (x86) The platform supports HVM-guest direct access to I/O devices. */
89 #define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
90 #define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
91 struct xen_sysctl_physinfo {
92     uint32_t threads_per_core;
93     uint32_t cores_per_socket;
94     uint32_t nr_cpus;     /* # CPUs currently online */
95     uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
96     uint32_t nr_nodes;    /* # nodes currently online */
97     uint32_t max_node_id; /* Largest possible node ID on this host */
98     uint32_t cpu_khz;
99     uint32_t capabilities;/* XEN_SYSCTL_PHYSCAP_??? */
100     uint64_aligned_t total_pages;
101     uint64_aligned_t free_pages;
102     uint64_aligned_t scrub_pages;
103     uint64_aligned_t outstanding_pages;
104     uint64_aligned_t max_mfn; /* Largest possible MFN on this host */
105     uint32_t hw_cap[8];
106 };
107 
108 /*
109  * Get the ID of the current scheduler.
110  */
111 /* XEN_SYSCTL_sched_id */
112 struct xen_sysctl_sched_id {
113     /* OUT variable */
114     uint32_t sched_id;
115 };
116 
117 /* Interface for controlling Xen software performance counters. */
118 /* XEN_SYSCTL_perfc_op */
119 /* Sub-operations: */
120 #define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
121 #define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
122 struct xen_sysctl_perfc_desc {
123     char         name[80];             /* name of perf counter */
124     uint32_t     nr_vals;              /* number of values for this counter */
125 };
126 typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
127 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
128 typedef uint32_t xen_sysctl_perfc_val_t;
129 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
130 
131 struct xen_sysctl_perfc_op {
132     /* IN variables. */
133     uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
134     /* OUT variables. */
135     uint32_t       nr_counters;       /*  number of counters description  */
136     uint32_t       nr_vals;           /*  number of values  */
137     /* counter information (or NULL) */
138     XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
139     /* counter values (or NULL) */
140     XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
141 };
142 
143 /* XEN_SYSCTL_getdomaininfolist */
144 struct xen_sysctl_getdomaininfolist {
145     /* IN variables. */
146     domid_t               first_domain;
147     uint32_t              max_domains;
148     XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
149     /* OUT variables. */
150     uint32_t              num_domains;
151 };
152 
153 /* Inject debug keys into Xen. */
154 /* XEN_SYSCTL_debug_keys */
155 struct xen_sysctl_debug_keys {
156     /* IN variables. */
157     XEN_GUEST_HANDLE_64(char) keys;
158     uint32_t nr_keys;
159 };
160 
161 /* Get physical CPU information. */
162 /* XEN_SYSCTL_getcpuinfo */
163 struct xen_sysctl_cpuinfo {
164     uint64_aligned_t idletime;
165 };
166 typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
167 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
168 struct xen_sysctl_getcpuinfo {
169     /* IN variables. */
170     uint32_t max_cpus;
171     XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
172     /* OUT variables. */
173     uint32_t nr_cpus;
174 };
175 
176 /* XEN_SYSCTL_availheap */
177 struct xen_sysctl_availheap {
178     /* IN variables. */
179     uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
180     uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
181     int32_t  node;          /* NUMA node of interest (-1 for all nodes). */
182     /* OUT variables. */
183     uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
184 };
185 
186 /* XEN_SYSCTL_get_pmstat */
187 struct pm_px_val {
188     uint64_aligned_t freq;        /* Px core frequency */
189     uint64_aligned_t residency;   /* Px residency time */
190     uint64_aligned_t count;       /* Px transition count */
191 };
192 typedef struct pm_px_val pm_px_val_t;
193 DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
194 
195 struct pm_px_stat {
196     uint8_t total;        /* total Px states */
197     uint8_t usable;       /* usable Px states */
198     uint8_t last;         /* last Px state */
199     uint8_t cur;          /* current Px state */
200     XEN_GUEST_HANDLE_64(uint64) trans_pt;   /* Px transition table */
201     XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
202 };
203 
204 struct pm_cx_stat {
205     uint32_t nr;    /* entry nr in triggers & residencies, including C0 */
206     uint32_t last;  /* last Cx state */
207     uint64_aligned_t idle_time;                 /* idle time from boot */
208     XEN_GUEST_HANDLE_64(uint64) triggers;    /* Cx trigger counts */
209     XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
210     uint32_t nr_pc;                          /* entry nr in pc[] */
211     uint32_t nr_cc;                          /* entry nr in cc[] */
212     /*
213      * These two arrays may (and generally will) have unused slots; slots not
214      * having a corresponding hardware register will not be written by the
215      * hypervisor. It is therefore up to the caller to put a suitable sentinel
216      * into all slots before invoking the function.
217      * Indexing is 1-biased (PC1/CC1 being at index 0).
218      */
219     XEN_GUEST_HANDLE_64(uint64) pc;
220     XEN_GUEST_HANDLE_64(uint64) cc;
221 };
222 
223 struct xen_sysctl_get_pmstat {
224 #define PMSTAT_CATEGORY_MASK 0xf0
225 #define PMSTAT_PX            0x10
226 #define PMSTAT_CX            0x20
227 #define PMSTAT_get_max_px    (PMSTAT_PX | 0x1)
228 #define PMSTAT_get_pxstat    (PMSTAT_PX | 0x2)
229 #define PMSTAT_reset_pxstat  (PMSTAT_PX | 0x3)
230 #define PMSTAT_get_max_cx    (PMSTAT_CX | 0x1)
231 #define PMSTAT_get_cxstat    (PMSTAT_CX | 0x2)
232 #define PMSTAT_reset_cxstat  (PMSTAT_CX | 0x3)
233     uint32_t type;
234     uint32_t cpuid;
235     union {
236         struct pm_px_stat getpx;
237         struct pm_cx_stat getcx;
238         /* other struct for tx, etc */
239     } u;
240 };
241 
242 /* XEN_SYSCTL_cpu_hotplug */
243 struct xen_sysctl_cpu_hotplug {
244     /* IN variables */
245     uint32_t cpu;   /* Physical cpu. */
246 #define XEN_SYSCTL_CPU_HOTPLUG_ONLINE  0
247 #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
248     uint32_t op;    /* hotplug opcode */
249 };
250 
251 /*
252  * Get/set xen power management, include
253  * 1. cpufreq governors and related parameters
254  */
255 /* XEN_SYSCTL_pm_op */
256 struct xen_userspace {
257     uint32_t scaling_setspeed;
258 };
259 
260 struct xen_ondemand {
261     uint32_t sampling_rate_max;
262     uint32_t sampling_rate_min;
263 
264     uint32_t sampling_rate;
265     uint32_t up_threshold;
266 };
267 
268 /*
269  * cpufreq para name of this structure named
270  * same as sysfs file name of native linux
271  */
272 #define CPUFREQ_NAME_LEN 16
273 struct xen_get_cpufreq_para {
274     /* IN/OUT variable */
275     uint32_t cpu_num;
276     uint32_t freq_num;
277     uint32_t gov_num;
278 
279     /* for all governors */
280     /* OUT variable */
281     XEN_GUEST_HANDLE_64(uint32) affected_cpus;
282     XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies;
283     XEN_GUEST_HANDLE_64(char)   scaling_available_governors;
284     char scaling_driver[CPUFREQ_NAME_LEN];
285 
286     uint32_t cpuinfo_cur_freq;
287     uint32_t cpuinfo_max_freq;
288     uint32_t cpuinfo_min_freq;
289     uint32_t scaling_cur_freq;
290 
291     char scaling_governor[CPUFREQ_NAME_LEN];
292     uint32_t scaling_max_freq;
293     uint32_t scaling_min_freq;
294 
295     /* for specific governor */
296     union {
297         struct  xen_userspace userspace;
298         struct  xen_ondemand ondemand;
299     } u;
300 
301     int32_t turbo_enabled;
302 };
303 
304 struct xen_set_cpufreq_gov {
305     char scaling_governor[CPUFREQ_NAME_LEN];
306 };
307 
308 struct xen_set_cpufreq_para {
309     #define SCALING_MAX_FREQ           1
310     #define SCALING_MIN_FREQ           2
311     #define SCALING_SETSPEED           3
312     #define SAMPLING_RATE              4
313     #define UP_THRESHOLD               5
314 
315     uint32_t ctrl_type;
316     uint32_t ctrl_value;
317 };
318 
319 struct xen_sysctl_pm_op {
320     #define PM_PARA_CATEGORY_MASK      0xf0
321     #define CPUFREQ_PARA               0x10
322 
323     /* cpufreq command type */
324     #define GET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x01)
325     #define SET_CPUFREQ_GOV            (CPUFREQ_PARA | 0x02)
326     #define SET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x03)
327     #define GET_CPUFREQ_AVGFREQ        (CPUFREQ_PARA | 0x04)
328 
329     /* set/reset scheduler power saving option */
330     #define XEN_SYSCTL_pm_op_set_sched_opt_smt    0x21
331 
332     /* cpuidle max_cstate access command */
333     #define XEN_SYSCTL_pm_op_get_max_cstate       0x22
334     #define XEN_SYSCTL_pm_op_set_max_cstate       0x23
335 
336     /* set scheduler migration cost value */
337     #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay   0x24
338     #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay   0x25
339 
340     /* enable/disable turbo mode when in dbs governor */
341     #define XEN_SYSCTL_pm_op_enable_turbo               0x26
342     #define XEN_SYSCTL_pm_op_disable_turbo              0x27
343 
344     uint32_t cmd;
345     uint32_t cpuid;
346     union {
347         struct xen_get_cpufreq_para get_para;
348         struct xen_set_cpufreq_gov  set_gov;
349         struct xen_set_cpufreq_para set_para;
350         uint64_aligned_t get_avgfreq;
351         uint32_t                    set_sched_opt_smt;
352         uint32_t                    get_max_cstate;
353         uint32_t                    set_max_cstate;
354     } u;
355 };
356 
357 /* XEN_SYSCTL_page_offline_op */
358 struct xen_sysctl_page_offline_op {
359     /* IN: range of page to be offlined */
360 #define sysctl_page_offline     1
361 #define sysctl_page_online      2
362 #define sysctl_query_page_offline  3
363     uint32_t cmd;
364     uint32_t start;
365     uint32_t end;
366     /* OUT: result of page offline request */
367     /*
368      * bit 0~15: result flags
369      * bit 16~31: owner
370      */
371     XEN_GUEST_HANDLE(uint32) status;
372 };
373 
374 #define PG_OFFLINE_STATUS_MASK    (0xFFUL)
375 
376 /* The result is invalid, i.e. HV does not handle it */
377 #define PG_OFFLINE_INVALID   (0x1UL << 0)
378 
379 #define PG_OFFLINE_OFFLINED  (0x1UL << 1)
380 #define PG_OFFLINE_PENDING   (0x1UL << 2)
381 #define PG_OFFLINE_FAILED    (0x1UL << 3)
382 #define PG_OFFLINE_AGAIN     (0x1UL << 4)
383 
384 #define PG_ONLINE_FAILED     PG_OFFLINE_FAILED
385 #define PG_ONLINE_ONLINED    PG_OFFLINE_OFFLINED
386 
387 #define PG_OFFLINE_STATUS_OFFLINED              (0x1UL << 1)
388 #define PG_OFFLINE_STATUS_ONLINE                (0x1UL << 2)
389 #define PG_OFFLINE_STATUS_OFFLINE_PENDING       (0x1UL << 3)
390 #define PG_OFFLINE_STATUS_BROKEN                (0x1UL << 4)
391 
392 #define PG_OFFLINE_MISC_MASK    (0xFFUL << 4)
393 
394 /* valid when PG_OFFLINE_FAILED or PG_OFFLINE_PENDING */
395 #define PG_OFFLINE_XENPAGE   (0x1UL << 8)
396 #define PG_OFFLINE_DOM0PAGE  (0x1UL << 9)
397 #define PG_OFFLINE_ANONYMOUS (0x1UL << 10)
398 #define PG_OFFLINE_NOT_CONV_RAM   (0x1UL << 11)
399 #define PG_OFFLINE_OWNED     (0x1UL << 12)
400 
401 #define PG_OFFLINE_BROKEN    (0x1UL << 13)
402 #define PG_ONLINE_BROKEN     PG_OFFLINE_BROKEN
403 
404 #define PG_OFFLINE_OWNER_SHIFT 16
405 
406 /* XEN_SYSCTL_lockprof_op */
407 /* Sub-operations: */
408 #define XEN_SYSCTL_LOCKPROF_reset 1   /* Reset all profile data to zero. */
409 #define XEN_SYSCTL_LOCKPROF_query 2   /* Get lock profile information. */
410 /* Record-type: */
411 #define LOCKPROF_TYPE_GLOBAL      0   /* global lock, idx meaningless */
412 #define LOCKPROF_TYPE_PERDOM      1   /* per-domain lock, idx is domid */
413 #define LOCKPROF_TYPE_N           2   /* number of types */
414 struct xen_sysctl_lockprof_data {
415     char     name[40];     /* lock name (may include up to 2 %d specifiers) */
416     int32_t  type;         /* LOCKPROF_TYPE_??? */
417     int32_t  idx;          /* index (e.g. domain id) */
418     uint64_aligned_t lock_cnt;     /* # of locking succeeded */
419     uint64_aligned_t block_cnt;    /* # of wait for lock */
420     uint64_aligned_t lock_time;    /* nsecs lock held */
421     uint64_aligned_t block_time;   /* nsecs waited for lock */
422 };
423 typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t;
424 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t);
425 struct xen_sysctl_lockprof_op {
426     /* IN variables. */
427     uint32_t       cmd;               /* XEN_SYSCTL_LOCKPROF_??? */
428     uint32_t       max_elem;          /* size of output buffer */
429     /* OUT variables (query only). */
430     uint32_t       nr_elem;           /* number of elements available */
431     uint64_aligned_t time;            /* nsecs of profile measurement */
432     /* profile information (or NULL) */
433     XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
434 };
435 
436 /* XEN_SYSCTL_cputopoinfo */
437 #define XEN_INVALID_CORE_ID     (~0U)
438 #define XEN_INVALID_SOCKET_ID   (~0U)
439 #define XEN_INVALID_NODE_ID     (~0U)
440 
441 struct xen_sysctl_cputopo {
442     uint32_t core;
443     uint32_t socket;
444     uint32_t node;
445 };
446 typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
447 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
448 
449 /*
450  * IN:
451  *  - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
452  *  - otherwise it's the number of entries in 'cputopo'
453  *
454  * OUT:
455  *  - If 'num_cpus' is less than the number Xen wants to write but the handle
456  *    handle is not a NULL one, partial data gets returned and 'num_cpus' gets
457  *    updated to reflect the intended number.
458  *  - Otherwise, 'num_cpus' shall indicate the number of entries written, which
459  *    may be less than the input value.
460  */
461 struct xen_sysctl_cputopoinfo {
462     uint32_t num_cpus;
463     XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
464 };
465 
466 /* XEN_SYSCTL_numainfo */
467 #define XEN_INVALID_MEM_SZ     (~0U)
468 #define XEN_INVALID_NODE_DIST  (~0U)
469 
470 struct xen_sysctl_meminfo {
471     uint64_t memsize;
472     uint64_t memfree;
473 };
474 typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
475 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
476 
477 /*
478  * IN:
479  *  - Both 'meminfo' and 'distance' handles being null is a request
480  *    for maximum value of 'num_nodes'.
481  *  - Otherwise it's the number of entries in 'meminfo' and square root
482  *    of number of entries in 'distance' (when corresponding handle is
483  *    non-null)
484  *
485  * OUT:
486  *  - If 'num_nodes' is less than the number Xen wants to write but either
487  *    handle is not a NULL one, partial data gets returned and 'num_nodes'
488  *    gets updated to reflect the intended number.
489  *  - Otherwise, 'num_nodes' shall indicate the number of entries written, which
490  *    may be less than the input value.
491  */
492 
493 struct xen_sysctl_numainfo {
494     uint32_t num_nodes;
495 
496     XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
497 
498     /*
499      * Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
500      * where N is the number of nodes that will be returned in 'num_nodes'
501      * (i.e. not 'num_nodes' provided by the caller)
502      */
503     XEN_GUEST_HANDLE_64(uint32) distance;
504 };
505 
506 /* XEN_SYSCTL_cpupool_op */
507 #define XEN_SYSCTL_CPUPOOL_OP_CREATE                1  /* C */
508 #define XEN_SYSCTL_CPUPOOL_OP_DESTROY               2  /* D */
509 #define XEN_SYSCTL_CPUPOOL_OP_INFO                  3  /* I */
510 #define XEN_SYSCTL_CPUPOOL_OP_ADDCPU                4  /* A */
511 #define XEN_SYSCTL_CPUPOOL_OP_RMCPU                 5  /* R */
512 #define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN            6  /* M */
513 #define XEN_SYSCTL_CPUPOOL_OP_FREEINFO              7  /* F */
514 #define XEN_SYSCTL_CPUPOOL_PAR_ANY     0xFFFFFFFF
515 struct xen_sysctl_cpupool_op {
516     uint32_t op;          /* IN */
517     uint32_t cpupool_id;  /* IN: CDIARM OUT: CI */
518     uint32_t sched_id;    /* IN: C      OUT: I  */
519     uint32_t domid;       /* IN: M              */
520     uint32_t cpu;         /* IN: AR             */
521     uint32_t n_dom;       /*            OUT: I  */
522     struct xenctl_bitmap cpumap; /*     OUT: IF */
523 };
524 
525 /*
526  * Error return values of cpupool operations:
527  *
528  * -EADDRINUSE:
529  *  XEN_SYSCTL_CPUPOOL_OP_RMCPU: A vcpu is temporarily pinned to the cpu
530  *    which is to be removed from a cpupool.
531  * -EADDRNOTAVAIL:
532  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A previous
533  *    request to remove a cpu from a cpupool was terminated with -EAGAIN
534  *    and has not been retried using the same parameters.
535  * -EAGAIN:
536  *  XEN_SYSCTL_CPUPOOL_OP_RMCPU: The cpu can't be removed from the cpupool
537  *    as it is active in the hypervisor. A retry will succeed soon.
538  * -EBUSY:
539  *  XEN_SYSCTL_CPUPOOL_OP_DESTROY, XEN_SYSCTL_CPUPOOL_OP_RMCPU: A cpupool
540  *    can't be destroyed or the last cpu can't be removed as there is still
541  *    a running domain in that cpupool.
542  * -EEXIST:
543  *  XEN_SYSCTL_CPUPOOL_OP_CREATE: A cpupool_id was specified and is already
544  *    existing.
545  * -EINVAL:
546  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: An illegal
547  *    cpu was specified (cpu does not exist).
548  *  XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN: An illegal domain was specified
549  *    (domain id illegal or not suitable for operation).
550  * -ENODEV:
551  *  XEN_SYSCTL_CPUPOOL_OP_ADDCPU, XEN_SYSCTL_CPUPOOL_OP_RMCPU: The specified
552  *    cpu is either not free (add) or not member of the specified cpupool
553  *    (remove).
554  * -ENOENT:
555  *  all: The cpupool with the specified cpupool_id doesn't exist.
556  *
557  * Some common error return values like -ENOMEM and -EFAULT are possible for
558  * all the operations.
559  */
560 
561 #define ARINC653_MAX_DOMAINS_PER_SCHEDULE   64
562 /*
563  * This structure is used to pass a new ARINC653 schedule from a
564  * privileged domain (ie dom0) to Xen.
565  */
566 struct xen_sysctl_arinc653_schedule {
567     /* major_frame holds the time for the new schedule's major frame
568      * in nanoseconds. */
569     uint64_aligned_t     major_frame;
570     /* num_sched_entries holds how many of the entries in the
571      * sched_entries[] array are valid. */
572     uint8_t     num_sched_entries;
573     /* The sched_entries array holds the actual schedule entries. */
574     struct {
575         /* dom_handle must match a domain's UUID */
576         xen_domain_handle_t dom_handle;
577         /* If a domain has multiple VCPUs, vcpu_id specifies which one
578          * this schedule entry applies to. It should be set to 0 if
579          * there is only one VCPU for the domain. */
580         unsigned int vcpu_id;
581         /* runtime specifies the amount of time that should be allocated
582          * to this VCPU per major frame. It is specified in nanoseconds */
583         uint64_aligned_t runtime;
584     } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
585 };
586 typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t;
587 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t);
588 
589 /*
590  * Valid range for context switch rate limit (in microseconds).
591  * Applicable to Credit and Credit2 schedulers.
592  */
593 #define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
594 #define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
595 
596 struct xen_sysctl_credit_schedule {
597     /* Length of timeslice in milliseconds */
598 #define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000
599 #define XEN_SYSCTL_CSCHED_TSLICE_MIN 1
600     unsigned tslice_ms;
601     unsigned ratelimit_us;
602     /*
603      * How long we consider a vCPU to be cache-hot on the
604      * CPU where it has run (max 100ms, in microseconds)
605     */
606 #define XEN_SYSCTL_CSCHED_MGR_DLY_MAX_US (100 * 1000)
607     unsigned vcpu_migr_delay_us;
608 };
609 
610 struct xen_sysctl_credit2_schedule {
611     unsigned ratelimit_us;
612 };
613 
614 /* XEN_SYSCTL_scheduler_op */
615 /* Set or get info? */
616 #define XEN_SYSCTL_SCHEDOP_putinfo 0
617 #define XEN_SYSCTL_SCHEDOP_getinfo 1
618 struct xen_sysctl_scheduler_op {
619     uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */
620     uint32_t sched_id;   /* XEN_SCHEDULER_* (domctl.h) */
621     uint32_t cmd;        /* XEN_SYSCTL_SCHEDOP_* */
622     union {
623         struct xen_sysctl_sched_arinc653 {
624             XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule;
625         } sched_arinc653;
626         struct xen_sysctl_credit_schedule sched_credit;
627         struct xen_sysctl_credit2_schedule sched_credit2;
628     } u;
629 };
630 
631 /*
632  * Output format of gcov data:
633  *
634  * XEN_GCOV_FORMAT_MAGIC XEN_GCOV_RECORD ... XEN_GCOV_RECORD
635  *
636  * That is, one magic number followed by 0 or more record.
637  *
638  * The magic number is stored as an uint32_t field.
639  *
640  * The record is packed and variable in length. It has the form:
641  *
642  *  filename: a NULL terminated path name extracted from gcov, used to
643  *            create the name of gcda file.
644  *  size:     a uint32_t field indicating the size of the payload, the
645  *            unit is byte.
646  *  payload:  the actual payload, length is `size' bytes.
647  *
648  * Userspace tool will split the record to different files.
649  */
650 
651 #define XEN_GCOV_FORMAT_MAGIC    0x58434f56 /* XCOV */
652 
653 /*
654  * Ouput format of LLVM coverage data is just a raw stream, as would be
655  * written by the compiler_rt run time library into a .profraw file. There
656  * are no special Xen tags or delimiters because none are needed.
657  */
658 
659 #define XEN_SYSCTL_COVERAGE_get_size 0 /* Get total size of output data */
660 #define XEN_SYSCTL_COVERAGE_read     1 /* Read output data */
661 #define XEN_SYSCTL_COVERAGE_reset    2 /* Reset all counters */
662 
663 struct xen_sysctl_coverage_op {
664     uint32_t cmd;
665     uint32_t size; /* IN/OUT: size of the buffer  */
666     XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
667 };
668 
669 #define XEN_SYSCTL_PSR_CMT_get_total_rmid            0
670 #define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor   1
671 /* The L3 cache size is returned in KB unit */
672 #define XEN_SYSCTL_PSR_CMT_get_l3_cache_size         2
673 #define XEN_SYSCTL_PSR_CMT_enabled                   3
674 #define XEN_SYSCTL_PSR_CMT_get_l3_event_mask         4
675 struct xen_sysctl_psr_cmt_op {
676     uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_CMT_* */
677     uint32_t flags;     /* padding variable, may be extended for future use */
678     union {
679         uint64_t data;  /* OUT */
680         struct {
681             uint32_t cpu;   /* IN */
682             uint32_t rsvd;
683         } l3_cache;
684     } u;
685 };
686 
687 /* XEN_SYSCTL_pcitopoinfo */
688 #define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
689 struct xen_sysctl_pcitopoinfo {
690     /*
691      * IN: Number of elements in 'devs' and 'nodes' arrays.
692      * OUT: Number of processed elements of those arrays.
693      */
694     uint32_t num_devs;
695 
696     /* IN: list of devices for which node IDs are requested. */
697     XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
698 
699     /*
700      * OUT: node identifier for each device.
701      * If information for a particular device is not available then
702      * corresponding entry will be set to XEN_INVALID_NODE_ID. If
703      * device is not known to the hypervisor then XEN_INVALID_DEV
704      * will be provided.
705      */
706     XEN_GUEST_HANDLE_64(uint32) nodes;
707 };
708 
709 #define XEN_SYSCTL_PSR_get_l3_info               0
710 #define XEN_SYSCTL_PSR_get_l2_info               1
711 #define XEN_SYSCTL_PSR_get_mba_info              2
712 struct xen_sysctl_psr_alloc {
713     uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_* */
714     uint32_t target;    /* IN */
715     union {
716         struct {
717             uint32_t cbm_len;   /* OUT: CBM length */
718             uint32_t cos_max;   /* OUT: Maximum COS */
719 #define XEN_SYSCTL_PSR_CAT_L3_CDP       (1u << 0)
720             uint32_t flags;     /* OUT: CAT flags */
721         } cat_info;
722 
723         struct {
724             uint32_t thrtl_max; /* OUT: Maximum throttle */
725             uint32_t cos_max;   /* OUT: Maximum COS */
726 #define XEN_SYSCTL_PSR_MBA_LINEAR      (1u << 0)
727             uint32_t flags;     /* OUT: MBA flags */
728         } mba_info;
729     } u;
730 };
731 
732 #define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
733 
734 #define XEN_SYSCTL_TMEM_OP_THAW                   0
735 #define XEN_SYSCTL_TMEM_OP_FREEZE                 1
736 #define XEN_SYSCTL_TMEM_OP_FLUSH                  2
737 #define XEN_SYSCTL_TMEM_OP_DESTROY                3
738 #define XEN_SYSCTL_TMEM_OP_LIST                   4
739 #define XEN_SYSCTL_TMEM_OP_GET_CLIENT_INFO        5
740 #define XEN_SYSCTL_TMEM_OP_SET_CLIENT_INFO        6
741 #define XEN_SYSCTL_TMEM_OP_GET_POOLS              7
742 #define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
743 #define XEN_SYSCTL_TMEM_OP_SET_POOLS              9
744 #define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
745 #define XEN_SYSCTL_TMEM_OP_SET_AUTH               11
746 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
747 #define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
748 #define XEN_SYSCTL_TMEM_OP_SAVE_END               21
749 #define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN          30
750 #define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
751 #define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
752 
753 /*
754  * XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
755  * xen_sysctl_tmem_op with this structure - sometimes with an extra
756  * page tackled on.
757  */
758 struct tmem_handle {
759     uint32_t pool_id;
760     uint32_t index;
761     xen_tmem_oid_t oid;
762 };
763 
764 /*
765  * XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT uses the 'client' in
766  * xen_tmem_op with this structure, which is mostly used during migration.
767  */
768 struct xen_tmem_client {
769     uint32_t version;   /* If mismatched we will get XEN_EOPNOTSUPP. */
770     uint32_t maxpools;  /* If greater than what hypervisor supports, will get
771                            XEN_ERANGE. */
772     uint32_t nr_pools;  /* Current amount of pools. Ignored on SET*/
773     union {             /* See TMEM_CLIENT_[COMPRESS,FROZEN] */
774         uint32_t raw;
775         struct {
776             uint8_t frozen:1,
777                     compress:1,
778                     migrating:1;
779         } u;
780     } flags;
781     uint32_t weight;
782 };
783 typedef struct xen_tmem_client xen_tmem_client_t;
784 DEFINE_XEN_GUEST_HANDLE(xen_tmem_client_t);
785 
786 /*
787  * XEN_SYSCTL_TMEM_OP_[GET|SET]_POOLS or XEN_SYSCTL_TMEM_OP_SET_AUTH
788  * uses the 'pool' array in * xen_sysctl_tmem_op with this structure.
789  * The XEN_SYSCTL_TMEM_OP_GET_POOLS hypercall will
790  * return the number of entries in 'pool' or a negative value
791  * if an error was encountered.
792  * The XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS] will return the number of
793  * entries in 'pool' processed or a negative value if an error
794  * was encountered.
795  */
796 struct xen_tmem_pool_info {
797     union {
798         uint32_t raw;
799         struct {
800             uint32_t persist:1,    /* See TMEM_POOL_PERSIST. */
801                      shared:1,     /* See TMEM_POOL_SHARED. */
802                      auth:1,       /* See TMEM_POOL_AUTH. */
803                      rsv1:1,
804                      pagebits:8,   /* TMEM_POOL_PAGESIZE_[SHIFT,MASK]. */
805                      rsv2:12,
806                      version:8;    /* TMEM_POOL_VERSION_[SHIFT,MASK]. */
807         } u;
808     } flags;
809     uint32_t id;                  /* Less than tmem_client.maxpools. */
810     uint64_t n_pages;             /* Zero on XEN_SYSCTL_TMEM_OP_SET_[AUTH|POOLS]. */
811     uint64_aligned_t uuid[2];
812 };
813 typedef struct xen_tmem_pool_info xen_tmem_pool_info_t;
814 DEFINE_XEN_GUEST_HANDLE(xen_tmem_pool_info_t);
815 
816 struct xen_sysctl_tmem_op {
817     uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
818     int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
819     uint32_t cli_id;    /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
820                            for all others can be the domain id or
821                            XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
822     uint32_t len;       /* IN: length of 'buf'. If not applicable to use 0. */
823     uint32_t arg;       /* IN: If not applicable to command use 0. */
824     uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
825     xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
826     union {
827         XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save/restore */
828         XEN_GUEST_HANDLE_64(xen_tmem_client_t) client; /* IN/OUT for */
829                         /*  XEN_SYSCTL_TMEM_OP_[GET,SAVE]_CLIENT. */
830         XEN_GUEST_HANDLE_64(xen_tmem_pool_info_t) pool; /* OUT for */
831                         /* XEN_SYSCTL_TMEM_OP_GET_POOLS. Must have 'len' */
832                         /* of them. */
833     } u;
834 };
835 
836 /*
837  * XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
838  *
839  * Return hardware capabilities concerning masking or faulting of the cpuid
840  * instruction for PV guests.
841  */
842 struct xen_sysctl_cpu_levelling_caps {
843 #define XEN_SYSCTL_CPU_LEVELCAP_faulting    (1ul <<  0) /* CPUID faulting    */
844 #define XEN_SYSCTL_CPU_LEVELCAP_ecx         (1ul <<  1) /* 0x00000001.ecx    */
845 #define XEN_SYSCTL_CPU_LEVELCAP_edx         (1ul <<  2) /* 0x00000001.edx    */
846 #define XEN_SYSCTL_CPU_LEVELCAP_extd_ecx    (1ul <<  3) /* 0x80000001.ecx    */
847 #define XEN_SYSCTL_CPU_LEVELCAP_extd_edx    (1ul <<  4) /* 0x80000001.edx    */
848 #define XEN_SYSCTL_CPU_LEVELCAP_xsave_eax   (1ul <<  5) /* 0x0000000D:1.eax  */
849 #define XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx (1ul <<  6) /* 0x00000006.ecx    */
850 #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax    (1ul <<  7) /* 0x00000007:0.eax  */
851 #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx    (1ul <<  8) /* 0x00000007:0.ebx  */
852     uint32_t caps;
853 };
854 
855 /*
856  * XEN_SYSCTL_get_cpu_featureset (x86 specific)
857  *
858  * Return information about featuresets available on this host.
859  *  -  Raw: The real cpuid values.
860  *  - Host: The values Xen is using, (after command line overrides, etc).
861  *  -   PV: Maximum set of features which can be given to a PV guest.
862  *  -  HVM: Maximum set of features which can be given to a HVM guest.
863  */
864 struct xen_sysctl_cpu_featureset {
865 #define XEN_SYSCTL_cpu_featureset_raw      0
866 #define XEN_SYSCTL_cpu_featureset_host     1
867 #define XEN_SYSCTL_cpu_featureset_pv       2
868 #define XEN_SYSCTL_cpu_featureset_hvm      3
869     uint32_t index;       /* IN: Which featureset to query? */
870     uint32_t nr_features; /* IN/OUT: Number of entries in/written to
871                            * 'features', or the maximum number of features if
872                            * the guest handle is NULL.  NB. All featuresets
873                            * come from the same numberspace, so have the same
874                            * maximum length. */
875     XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
876 };
877 
878 /*
879  * XEN_SYSCTL_LIVEPATCH_op
880  *
881  * Refer to the docs/unstable/misc/livepatch.markdown
882  * for the design details of this hypercall.
883  *
884  * There are four sub-ops:
885  *  XEN_SYSCTL_LIVEPATCH_UPLOAD (0)
886  *  XEN_SYSCTL_LIVEPATCH_GET (1)
887  *  XEN_SYSCTL_LIVEPATCH_LIST (2)
888  *  XEN_SYSCTL_LIVEPATCH_ACTION (3)
889  *
890  * The normal sequence of sub-ops is to:
891  *  1) XEN_SYSCTL_LIVEPATCH_UPLOAD to upload the payload. If errors STOP.
892  *  2) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If -XEN_EAGAIN spin.
893  *     If zero go to next step.
894  *  3) XEN_SYSCTL_LIVEPATCH_ACTION with LIVEPATCH_ACTION_APPLY to apply the patch.
895  *  4) XEN_SYSCTL_LIVEPATCH_GET to check the `->rc`. If in -XEN_EAGAIN spin.
896  *     If zero exit with success.
897  */
898 
899 #define LIVEPATCH_PAYLOAD_VERSION 1
900 /*
901  * .livepatch.funcs structure layout defined in the `Payload format`
902  * section in the Live Patch design document.
903  *
904  * We guard this with __XEN__ as toolstacks SHOULD not use it.
905  */
906 #ifdef __XEN__
907 struct livepatch_func {
908     const char *name;       /* Name of function to be patched. */
909     void *new_addr;
910     void *old_addr;
911     uint32_t new_size;
912     uint32_t old_size;
913     uint8_t version;        /* MUST be LIVEPATCH_PAYLOAD_VERSION. */
914     uint8_t opaque[31];
915 };
916 typedef struct livepatch_func livepatch_func_t;
917 #endif
918 
919 /*
920  * Structure describing an ELF payload. Uniquely identifies the
921  * payload. Should be human readable.
922  * Recommended length is upto XEN_LIVEPATCH_NAME_SIZE.
923  * Includes the NUL terminator.
924  */
925 #define XEN_LIVEPATCH_NAME_SIZE 128
926 struct xen_livepatch_name {
927     XEN_GUEST_HANDLE_64(char) name;         /* IN: pointer to name. */
928     uint16_t size;                          /* IN: size of name. May be upto
929                                                XEN_LIVEPATCH_NAME_SIZE. */
930     uint16_t pad[3];                        /* IN: MUST be zero. */
931 };
932 
933 /*
934  * Upload a payload to the hypervisor. The payload is verified
935  * against basic checks and if there are any issues the proper return code
936  * will be returned. The payload is not applied at this time - that is
937  * controlled by XEN_SYSCTL_LIVEPATCH_ACTION.
938  *
939  * The return value is zero if the payload was succesfully uploaded.
940  * Otherwise an EXX return value is provided. Duplicate `name` are not
941  * supported.
942  *
943  * The payload at this point is verified against basic checks.
944  *
945  * The `payload` is the ELF payload as mentioned in the `Payload format`
946  * section in the Live Patch design document.
947  */
948 #define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
949 struct xen_sysctl_livepatch_upload {
950     struct xen_livepatch_name name;         /* IN, name of the patch. */
951     uint64_t size;                          /* IN, size of the ELF file. */
952     XEN_GUEST_HANDLE_64(uint8) payload;     /* IN, the ELF file. */
953 };
954 
955 /*
956  * Retrieve an status of an specific payload.
957  *
958  * Upon completion the `struct xen_livepatch_status` is updated.
959  *
960  * The return value is zero on success and XEN_EXX on failure. This operation
961  * is synchronous and does not require preemption.
962  */
963 #define XEN_SYSCTL_LIVEPATCH_GET 1
964 
965 struct xen_livepatch_status {
966 #define LIVEPATCH_STATE_CHECKED      1
967 #define LIVEPATCH_STATE_APPLIED      2
968     uint32_t state;                /* OUT: LIVEPATCH_STATE_*. */
969     int32_t rc;                    /* OUT: 0 if no error, otherwise -XEN_EXX. */
970 };
971 typedef struct xen_livepatch_status xen_livepatch_status_t;
972 DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
973 
974 struct xen_sysctl_livepatch_get {
975     struct xen_livepatch_name name;         /* IN, name of the payload. */
976     struct xen_livepatch_status status;     /* IN/OUT, state of it. */
977 };
978 
979 /*
980  * Retrieve an array of abbreviated status and names of payloads that are
981  * loaded in the hypervisor.
982  *
983  * If the hypercall returns an positive number, it is the number (up to `nr`)
984  * of the payloads returned, along with `nr` updated with the number of remaining
985  * payloads, `version` updated (it may be the same across hypercalls. If it
986  * varies the data is stale and further calls could fail). The `status`,
987  * `name`, and `len`' are updated at their designed index value (`idx`) with
988  * the returned value of data.
989  *
990  * If the hypercall returns E2BIG the `nr` is too big and should be
991  * lowered. The upper limit of `nr` is left to the implemention.
992  *
993  * Note that due to the asynchronous nature of hypercalls the domain might have
994  * added or removed the number of payloads making this information stale. It is
995  * the responsibility of the toolstack to use the `version` field to check
996  * between each invocation. if the version differs it should discard the stale
997  * data and start from scratch. It is OK for the toolstack to use the new
998  * `version` field.
999  */
1000 #define XEN_SYSCTL_LIVEPATCH_LIST 2
1001 struct xen_sysctl_livepatch_list {
1002     uint32_t version;                       /* OUT: Hypervisor stamps value.
1003                                                If varies between calls, we are
1004                                              * getting stale data. */
1005     uint32_t idx;                           /* IN: Index into hypervisor list. */
1006     uint32_t nr;                            /* IN: How many status, name, and len
1007                                                should fill out. Can be zero to get
1008                                                amount of payloads and version.
1009                                                OUT: How many payloads left. */
1010     uint32_t pad;                           /* IN: Must be zero. */
1011     XEN_GUEST_HANDLE_64(xen_livepatch_status_t) status;  /* OUT. Must have enough
1012                                                space allocate for nr of them. */
1013     XEN_GUEST_HANDLE_64(char) name;         /* OUT: Array of names. Each member
1014                                                MUST XEN_LIVEPATCH_NAME_SIZE in size.
1015                                                Must have nr of them. */
1016     XEN_GUEST_HANDLE_64(uint32) len;        /* OUT: Array of lengths of name's.
1017                                                Must have nr of them. */
1018 };
1019 
1020 /*
1021  * Perform an operation on the payload structure referenced by the `name` field.
1022  * The operation request is asynchronous and the status should be retrieved
1023  * by using either XEN_SYSCTL_LIVEPATCH_GET or XEN_SYSCTL_LIVEPATCH_LIST hypercall.
1024  */
1025 #define XEN_SYSCTL_LIVEPATCH_ACTION 3
1026 struct xen_sysctl_livepatch_action {
1027     struct xen_livepatch_name name;         /* IN, name of the patch. */
1028 #define LIVEPATCH_ACTION_UNLOAD       1
1029 #define LIVEPATCH_ACTION_REVERT       2
1030 #define LIVEPATCH_ACTION_APPLY        3
1031 #define LIVEPATCH_ACTION_REPLACE      4
1032     uint32_t cmd;                           /* IN: LIVEPATCH_ACTION_*. */
1033     uint32_t timeout;                       /* IN: If zero then uses */
1034                                             /* hypervisor default. */
1035                                             /* Or upper bound of time (ns) */
1036                                             /* for operation to take. */
1037 };
1038 
1039 struct xen_sysctl_livepatch_op {
1040     uint32_t cmd;                           /* IN: XEN_SYSCTL_LIVEPATCH_*. */
1041     uint32_t pad;                           /* IN: Always zero. */
1042     union {
1043         struct xen_sysctl_livepatch_upload upload;
1044         struct xen_sysctl_livepatch_list list;
1045         struct xen_sysctl_livepatch_get get;
1046         struct xen_sysctl_livepatch_action action;
1047     } u;
1048 };
1049 
1050 /*
1051  * XEN_SYSCTL_set_parameter
1052  *
1053  * Change hypervisor parameters at runtime.
1054  * The input string is parsed similar to the boot parameters.
1055  * Parameters are a single string terminated by a NUL byte of max. size
1056  * characters. Multiple settings can be specified by separating them
1057  * with blanks.
1058  */
1059 
1060 struct xen_sysctl_set_parameter {
1061     XEN_GUEST_HANDLE_64(char) params;       /* IN: pointer to parameters. */
1062     uint16_t size;                          /* IN: size of parameters. */
1063     uint16_t pad[3];                        /* IN: MUST be zero. */
1064 };
1065 
1066 struct xen_sysctl {
1067     uint32_t cmd;
1068 #define XEN_SYSCTL_readconsole                    1
1069 #define XEN_SYSCTL_tbuf_op                        2
1070 #define XEN_SYSCTL_physinfo                       3
1071 #define XEN_SYSCTL_sched_id                       4
1072 #define XEN_SYSCTL_perfc_op                       5
1073 #define XEN_SYSCTL_getdomaininfolist              6
1074 #define XEN_SYSCTL_debug_keys                     7
1075 #define XEN_SYSCTL_getcpuinfo                     8
1076 #define XEN_SYSCTL_availheap                      9
1077 #define XEN_SYSCTL_get_pmstat                    10
1078 #define XEN_SYSCTL_cpu_hotplug                   11
1079 #define XEN_SYSCTL_pm_op                         12
1080 #define XEN_SYSCTL_page_offline_op               14
1081 #define XEN_SYSCTL_lockprof_op                   15
1082 #define XEN_SYSCTL_cputopoinfo                   16
1083 #define XEN_SYSCTL_numainfo                      17
1084 #define XEN_SYSCTL_cpupool_op                    18
1085 #define XEN_SYSCTL_scheduler_op                  19
1086 #define XEN_SYSCTL_coverage_op                   20
1087 #define XEN_SYSCTL_psr_cmt_op                    21
1088 #define XEN_SYSCTL_pcitopoinfo                   22
1089 #define XEN_SYSCTL_psr_alloc                     23
1090 #define XEN_SYSCTL_tmem_op                       24
1091 #define XEN_SYSCTL_get_cpu_levelling_caps        25
1092 #define XEN_SYSCTL_get_cpu_featureset            26
1093 #define XEN_SYSCTL_livepatch_op                  27
1094 #define XEN_SYSCTL_set_parameter                 28
1095     uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
1096     union {
1097         struct xen_sysctl_readconsole       readconsole;
1098         struct xen_sysctl_tbuf_op           tbuf_op;
1099         struct xen_sysctl_physinfo          physinfo;
1100         struct xen_sysctl_cputopoinfo       cputopoinfo;
1101         struct xen_sysctl_pcitopoinfo       pcitopoinfo;
1102         struct xen_sysctl_numainfo          numainfo;
1103         struct xen_sysctl_sched_id          sched_id;
1104         struct xen_sysctl_perfc_op          perfc_op;
1105         struct xen_sysctl_getdomaininfolist getdomaininfolist;
1106         struct xen_sysctl_debug_keys        debug_keys;
1107         struct xen_sysctl_getcpuinfo        getcpuinfo;
1108         struct xen_sysctl_availheap         availheap;
1109         struct xen_sysctl_get_pmstat        get_pmstat;
1110         struct xen_sysctl_cpu_hotplug       cpu_hotplug;
1111         struct xen_sysctl_pm_op             pm_op;
1112         struct xen_sysctl_page_offline_op   page_offline;
1113         struct xen_sysctl_lockprof_op       lockprof_op;
1114         struct xen_sysctl_cpupool_op        cpupool_op;
1115         struct xen_sysctl_scheduler_op      scheduler_op;
1116         struct xen_sysctl_coverage_op       coverage_op;
1117         struct xen_sysctl_psr_cmt_op        psr_cmt_op;
1118         struct xen_sysctl_psr_alloc         psr_alloc;
1119         struct xen_sysctl_tmem_op           tmem_op;
1120         struct xen_sysctl_cpu_levelling_caps cpu_levelling_caps;
1121         struct xen_sysctl_cpu_featureset    cpu_featureset;
1122         struct xen_sysctl_livepatch_op      livepatch;
1123         struct xen_sysctl_set_parameter     set_parameter;
1124         uint8_t                             pad[128];
1125     } u;
1126 };
1127 typedef struct xen_sysctl xen_sysctl_t;
1128 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
1129 
1130 #endif /* __XEN_PUBLIC_SYSCTL_H__ */
1131 
1132 /*
1133  * Local variables:
1134  * mode: C
1135  * c-file-style: "BSD"
1136  * c-basic-offset: 4
1137  * tab-width: 4
1138  * indent-tabs-mode: nil
1139  * End:
1140  */
1141