diff options
Diffstat (limited to 'src/include/xen/hvm')
-rw-r--r-- | src/include/xen/hvm/hvm_op.h | 450 | ||||
-rw-r--r-- | src/include/xen/hvm/params.h | 249 |
2 files changed, 420 insertions, 279 deletions
diff --git a/src/include/xen/hvm/hvm_op.h b/src/include/xen/hvm/hvm_op.h index 469ad4fbc..5d734c48f 100644 --- a/src/include/xen/hvm/hvm_op.h +++ b/src/include/xen/hvm/hvm_op.h @@ -1,21 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * Copyright (c) 2007, Keir Fraser */ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ @@ -32,12 +17,33 @@ FILE_LICENCE ( MIT ); #define HVMOP_get_param 1 struct xen_hvm_param { domid_t domid; /* IN */ + uint16_t pad; uint32_t index; /* IN */ uint64_t value; /* IN/OUT */ }; typedef struct xen_hvm_param xen_hvm_param_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t); +struct xen_hvm_altp2m_suppress_ve { + uint16_t view; + uint8_t suppress_ve; /* Boolean type. */ + uint8_t pad1; + uint32_t pad2; + uint64_t gfn; +}; + +struct xen_hvm_altp2m_suppress_ve_multi { + uint16_t view; + uint8_t suppress_ve; /* Boolean type. */ + uint8_t pad1; + int32_t first_error; /* Should be set to 0. */ + uint64_t first_gfn; /* Value may be updated. */ + uint64_t last_gfn; + uint64_t first_error_gfn; /* Gfn of the first error. */ +}; + +#if __XEN_INTERFACE_VERSION__ < 0x00040900 + /* Set the logical level of one of a domain's PCI INTx wires. */ #define HVMOP_set_pci_intx_level 2 struct xen_hvm_set_pci_intx_level { @@ -76,63 +82,38 @@ struct xen_hvm_set_pci_link_route { typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t; DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t); +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */ + /* Flushes all VCPU TLBs: @arg must be NULL. */ #define HVMOP_flush_tlbs 5 +/* + * hvmmem_type_t should not be defined when generating the corresponding + * compat header. This will ensure that the improperly named HVMMEM_(*) + * values are defined only once. + */ +#ifndef XEN_GENERATING_COMPAT_HEADERS + typedef enum { HVMMEM_ram_rw, /* Normal read/write guest RAM */ HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ +#if __XEN_INTERFACE_VERSION__ < 0x00040700 + HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */ +#else + HVMMEM_unused, /* Placeholder; setting memory to this type + will fail for code after 4.7.0 */ +#endif + HVMMEM_ioreq_server /* Memory type claimed by an ioreq server; type + changes to this value are only allowed after + an ioreq server has claimed its ownership. + Only pages with HVMMEM_ram_rw are allowed to + change to this type; conversely, pages with + this type are only allowed to be changed back + to HVMMEM_ram_rw. */ } hvmmem_type_t; -/* Following tools-only interfaces may change in future. */ -#if defined(__XEN__) || defined(__XEN_TOOLS__) - -/* Track dirty VRAM. */ -#define HVMOP_track_dirty_vram 6 -struct xen_hvm_track_dirty_vram { - /* Domain to be tracked. */ - domid_t domid; - /* Number of pages to track. */ - uint32_t nr; - /* First pfn to track. */ - uint64_aligned_t first_pfn; - /* OUT variable. */ - /* Dirty bitmap buffer. */ - XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; -}; -typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); - -/* Notify that some pages got modified by the Device Model. */ -#define HVMOP_modified_memory 7 -struct xen_hvm_modified_memory { - /* Domain to be updated. */ - domid_t domid; - /* Number of pages. */ - uint32_t nr; - /* First pfn. */ - uint64_aligned_t first_pfn; -}; -typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t); - -#define HVMOP_set_mem_type 8 -/* Notify that a region of memory is to be treated in a specific way. */ -struct xen_hvm_set_mem_type { - /* Domain to be updated. */ - domid_t domid; - /* Memory type */ - uint16_t hvmmem_type; - /* Number of pages. */ - uint32_t nr; - /* First pfn. */ - uint64_aligned_t first_pfn; -}; -typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t); - -#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ +#endif /* XEN_GENERATING_COMPAT_HEADERS */ /* Hint from PV drivers for pagetable destruction. */ #define HVMOP_pagetable_dying 9 @@ -171,38 +152,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); /* Deprecated by XENMEM_access_op_get_access */ #define HVMOP_get_mem_access 13 -#define HVMOP_inject_trap 14 -/* Inject a trap into a VCPU, which will get taken up on the next - * scheduling of it. Note that the caller should know enough of the - * state of the CPU before injecting, to know what the effect of - * injecting the trap will be. - */ -struct xen_hvm_inject_trap { - /* Domain to be queried. */ - domid_t domid; - /* VCPU */ - uint32_t vcpuid; - /* Vector number */ - uint32_t vector; - /* Trap type (HVMOP_TRAP_*) */ - uint32_t type; -/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */ -# define HVMOP_TRAP_ext_int 0 /* external interrupt */ -# define HVMOP_TRAP_nmi 2 /* nmi */ -# define HVMOP_TRAP_hw_exc 3 /* hardware exception */ -# define HVMOP_TRAP_sw_int 4 /* software interrupt (CD nn) */ -# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */ -# define HVMOP_TRAP_sw_exc 6 /* INT3 (CC), INTO (CE) */ - /* Error code, or ~0u to skip */ - uint32_t error_code; - /* Intruction length */ - uint32_t insn_len; - /* CR2 for page faults */ - uint64_aligned_t cr2; -}; -typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t); - #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ #define HVMOP_get_mem_type 15 @@ -222,154 +171,201 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t); /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) -/* MSI injection for emulated devices */ -#define HVMOP_inject_msi 16 -struct xen_hvm_inject_msi { - /* Domain to be injected */ - domid_t domid; - /* Data -- lower 32 bits */ - uint32_t data; - /* Address (0xfeexxxxx) */ - uint64_t addr; -}; -typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t); +/* + * Definitions relating to DMOP_create_ioreq_server. (Defined here for + * backwards compatibility). + */ +#define HVM_IOREQSRV_BUFIOREQ_OFF 0 +#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1 /* - * IOREQ Servers - * - * The interface between an I/O emulator an Xen is called an IOREQ Server. - * A domain supports a single 'legacy' IOREQ Server which is instantiated if - * parameter... - * - * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous - * ioreq structures), or... - * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered - * ioreq ring), or... - * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses - * to request buffered I/O emulation). - * - * The following hypercalls facilitate the creation of IOREQ Servers for - * 'secondary' emulators which are invoked to implement port I/O, memory, or - * PCI config space ranges which they explicitly register. + * Use this when read_pointer gets updated atomically and + * the pointer pair gets read atomically: */ +#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 -typedef uint16_t ioservid_t; +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + +#if defined(__i386__) || defined(__x86_64__) /* - * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary - * emulator servicing domain <domid>. - * - * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero - * the buffered ioreq ring will not be allocated and hence all emulation - * requestes to this server will be synchronous. + * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event + * channel upcalls on the specified <vcpu>. If set, + * this vector will be used in preference to the + * domain global callback via (see + * HVM_PARAM_CALLBACK_IRQ). */ -#define HVMOP_create_ioreq_server 17 -struct xen_hvm_create_ioreq_server { - domid_t domid; /* IN - domain to be serviced */ - uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */ - ioservid_t id; /* OUT - server id */ +#define HVMOP_set_evtchn_upcall_vector 23 +struct xen_hvm_evtchn_upcall_vector { + uint32_t vcpu; + uint8_t vector; }; -typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t); +typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t); -/* - * HVMOP_get_ioreq_server_info: Get all the information necessary to access - * IOREQ Server <id>. - * - * The emulator needs to map the synchronous ioreq structures and buffered - * ioreq ring (if it exists) that Xen uses to request emulation. These are - * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn> - * respectively. In addition, if the IOREQ Server is handling buffered - * emulation requests, the emulator needs to bind to event channel - * <bufioreq_port> to listen for them. (The event channels used for - * synchronous emulation requests are specified in the per-CPU ioreq - * structures in <ioreq_pfn>). - * If the IOREQ Server is not handling buffered emulation requests then the - * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0. - */ -#define HVMOP_get_ioreq_server_info 18 -struct xen_hvm_get_ioreq_server_info { - domid_t domid; /* IN - domain to be serviced */ - ioservid_t id; /* IN - server id */ - evtchn_port_t bufioreq_port; /* OUT - buffered ioreq port */ - uint64_aligned_t ioreq_pfn; /* OUT - sync ioreq pfn */ - uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */ +#endif /* defined(__i386__) || defined(__x86_64__) */ + +#define HVMOP_guest_request_vm_event 24 + +/* HVMOP_altp2m: perform altp2m state operations */ +#define HVMOP_altp2m 25 + +#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001 + +struct xen_hvm_altp2m_domain_state { + /* IN or OUT variable on/off */ + uint8_t state; }; -typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t); +typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t); + +struct xen_hvm_altp2m_vcpu_enable_notify { + uint32_t vcpu_id; + uint32_t pad; + /* #VE info area gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t); -/* - * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid> - * for emulation by the client of IOREQ - * Server <id> - * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid> - * for emulation by the client of IOREQ - * Server <id> - * - * There are three types of I/O that can be emulated: port I/O, memory accesses - * and PCI config space accesses. The <type> field denotes which type of range - * the <start> and <end> (inclusive) fields are specifying. - * PCI config space ranges are specified by segment/bus/device/function values - * which should be encoded using the HVMOP_PCI_SBDF helper macro below. - * - * NOTE: unless an emulation request falls entirely within a range mapped - * by a secondary emulator, it will not be passed to that emulator. - */ -#define HVMOP_map_io_range_to_ioreq_server 19 -#define HVMOP_unmap_io_range_from_ioreq_server 20 -struct xen_hvm_io_range { - domid_t domid; /* IN - domain to be serviced */ - ioservid_t id; /* IN - server id */ - uint32_t type; /* IN - type of range */ -# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */ -# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */ -# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */ - uint64_aligned_t start, end; /* IN - inclusive start and end of range */ +struct xen_hvm_altp2m_vcpu_disable_notify { + uint32_t vcpu_id; +}; +typedef struct xen_hvm_altp2m_vcpu_disable_notify xen_hvm_altp2m_vcpu_disable_notify_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_disable_notify_t); + +struct xen_hvm_altp2m_view { + /* IN/OUT variable */ + uint16_t view; + uint16_t hvmmem_default_access; /* xenmem_access_t */ }; -typedef struct xen_hvm_io_range xen_hvm_io_range_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t); +typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t); -#define HVMOP_PCI_SBDF(s,b,d,f) \ - ((((s) & 0xffff) << 16) | \ - (((b) & 0xff) << 8) | \ - (((d) & 0x1f) << 3) | \ - ((f) & 0x07)) +#if __XEN_INTERFACE_VERSION__ < 0x00040a00 +struct xen_hvm_altp2m_set_mem_access { + /* view */ + uint16_t view; + /* Memory type */ + uint16_t access; /* xenmem_access_t */ + uint32_t pad; + /* gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t); +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */ -/* - * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain - * <domid>. - * - * Any registered I/O ranges will be automatically deregistered. - */ -#define HVMOP_destroy_ioreq_server 21 -struct xen_hvm_destroy_ioreq_server { - domid_t domid; /* IN - domain to be serviced */ - ioservid_t id; /* IN - server id */ +struct xen_hvm_altp2m_mem_access { + /* view */ + uint16_t view; + /* Memory type */ + uint16_t access; /* xenmem_access_t */ + uint32_t pad; + /* gfn */ + uint64_t gfn; +}; +typedef struct xen_hvm_altp2m_mem_access xen_hvm_altp2m_mem_access_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_mem_access_t); + +struct xen_hvm_altp2m_set_mem_access_multi { + /* view */ + uint16_t view; + uint16_t pad; + /* Number of pages */ + uint32_t nr; + /* + * Used for continuation purposes. + * Must be set to zero upon initial invocation. + */ + uint64_t opaque; + /* List of pfns to set access for */ + XEN_GUEST_HANDLE(const_uint64) pfn_list; + /* Corresponding list of access settings for pfn_list */ + XEN_GUEST_HANDLE(const_uint8) access_list; }; -typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t); -/* - * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing - * domain <domid>. - * - * The IOREQ Server will not be passed any emulation requests until it is in the - * enabled state. - * Note that the contents of the ioreq_pfn and bufioreq_fn (see - * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in - * the enabled state. - */ -#define HVMOP_set_ioreq_server_state 22 -struct xen_hvm_set_ioreq_server_state { - domid_t domid; /* IN - domain to be serviced */ - ioservid_t id; /* IN - server id */ - uint8_t enabled; /* IN - enabled? */ +struct xen_hvm_altp2m_change_gfn { + /* view */ + uint16_t view; + uint16_t pad1; + uint32_t pad2; + /* old gfn */ + uint64_t old_gfn; + /* new gfn, INVALID_GFN (~0UL) means revert */ + uint64_t new_gfn; }; -typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t); +typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t); -#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ +struct xen_hvm_altp2m_get_vcpu_p2m_idx { + uint32_t vcpu_id; + uint16_t altp2m_idx; +}; + +struct xen_hvm_altp2m_set_visibility { + uint16_t altp2m_idx; + uint8_t visible; + uint8_t pad; +}; + +struct xen_hvm_altp2m_op { + uint32_t version; /* HVMOP_ALTP2M_INTERFACE_VERSION */ + uint32_t cmd; +/* Get/set the altp2m state for a domain */ +#define HVMOP_altp2m_get_domain_state 1 +#define HVMOP_altp2m_set_domain_state 2 +/* Set a given VCPU to receive altp2m event notifications */ +#define HVMOP_altp2m_vcpu_enable_notify 3 +/* Create a new view */ +#define HVMOP_altp2m_create_p2m 4 +/* Destroy a view */ +#define HVMOP_altp2m_destroy_p2m 5 +/* Switch view for an entire domain */ +#define HVMOP_altp2m_switch_p2m 6 +/* Notify that a page of memory is to have specific access types */ +#define HVMOP_altp2m_set_mem_access 7 +/* Change a p2m entry to have a different gfn->mfn mapping */ +#define HVMOP_altp2m_change_gfn 8 +/* Set access for an array of pages */ +#define HVMOP_altp2m_set_mem_access_multi 9 +/* Set the "Suppress #VE" bit on a page */ +#define HVMOP_altp2m_set_suppress_ve 10 +/* Get the "Suppress #VE" bit of a page */ +#define HVMOP_altp2m_get_suppress_ve 11 +/* Get the access of a page of memory from a certain view */ +#define HVMOP_altp2m_get_mem_access 12 +/* Disable altp2m event notifications for a given VCPU */ +#define HVMOP_altp2m_vcpu_disable_notify 13 +/* Get the active vcpu p2m index */ +#define HVMOP_altp2m_get_p2m_idx 14 +/* Set the "Supress #VE" bit for a range of pages */ +#define HVMOP_altp2m_set_suppress_ve_multi 15 +/* Set visibility for a given altp2m view */ +#define HVMOP_altp2m_set_visibility 16 + domid_t domain; + uint16_t pad1; + uint32_t pad2; + union { + struct xen_hvm_altp2m_domain_state domain_state; + struct xen_hvm_altp2m_vcpu_enable_notify enable_notify; + struct xen_hvm_altp2m_view view; +#if __XEN_INTERFACE_VERSION__ < 0x00040a00 + struct xen_hvm_altp2m_set_mem_access set_mem_access; +#endif /* __XEN_INTERFACE_VERSION__ < 0x00040a00 */ + struct xen_hvm_altp2m_mem_access mem_access; + struct xen_hvm_altp2m_change_gfn change_gfn; + struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi; + struct xen_hvm_altp2m_suppress_ve suppress_ve; + struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi; + struct xen_hvm_altp2m_vcpu_disable_notify disable_notify; + struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx; + struct xen_hvm_altp2m_set_visibility set_visibility; + uint8_t pad[64]; + } u; +}; +typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t); #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ diff --git a/src/include/xen/hvm/params.h b/src/include/xen/hvm/params.h index 49e06586d..39cb7f7d3 100644 --- a/src/include/xen/hvm/params.h +++ b/src/include/xen/hvm/params.h @@ -1,21 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * Copyright (c) 2007, Keir Fraser */ #ifndef __XEN_PUBLIC_HVM_PARAMS_H__ @@ -25,22 +10,67 @@ FILE_LICENCE ( MIT ); #include "hvm_op.h" +/* These parameters are deprecated and their meaning is undefined. */ +#if defined(__XEN__) || defined(__XEN_TOOLS__) + +#define HVM_PARAM_PAE_ENABLED 4 +#define HVM_PARAM_DM_DOMAIN 13 +#define HVM_PARAM_MEMORY_EVENT_CR0 20 +#define HVM_PARAM_MEMORY_EVENT_CR3 21 +#define HVM_PARAM_MEMORY_EVENT_CR4 22 +#define HVM_PARAM_MEMORY_EVENT_INT3 23 +#define HVM_PARAM_NESTEDHVM 24 +#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 +#define HVM_PARAM_BUFIOREQ_EVTCHN 26 +#define HVM_PARAM_MEMORY_EVENT_MSR 30 + +#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ + /* * Parameter space for HVMOP_{set,get}_param. */ +#define HVM_PARAM_CALLBACK_IRQ 0 +#define HVM_PARAM_CALLBACK_IRQ_TYPE_MASK xen_mk_ullong(0xFF00000000000000) /* * How should CPU0 event-channel notifications be delivered? - * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). - * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: - * Domain = val[47:32], Bus = val[31:16], - * DevFn = val[15: 8], IntX = val[ 1: 0] - * val[63:56] == 2: val[7:0] is a vector number, check for - * XENFEAT_hvm_callback_vector to know if this delivery - * method is available. + * * If val == 0 then CPU0 event-channel notifications are not delivered. + * If val != 0, val[63:56] encodes the type, as follows: */ -#define HVM_PARAM_CALLBACK_IRQ 0 + +#define HVM_PARAM_CALLBACK_TYPE_GSI 0 +/* + * val[55:0] is a delivery GSI. GSI 0 cannot be used, as it aliases val == 0, + * and disables all notifications. + */ + +#define HVM_PARAM_CALLBACK_TYPE_PCI_INTX 1 +/* + * val[55:0] is a delivery PCI INTx line: + * Domain = val[47:32], Bus = val[31:16] DevFn = val[15:8], IntX = val[1:0] + */ + +#if defined(__i386__) || defined(__x86_64__) +#define HVM_PARAM_CALLBACK_TYPE_VECTOR 2 +/* + * val[7:0] is a vector number. Check for XENFEAT_hvm_callback_vector to know + * if this delivery method is available. + */ +#elif defined(__arm__) || defined(__aarch64__) +#define HVM_PARAM_CALLBACK_TYPE_PPI 2 +/* + * val[55:16] needs to be zero. + * val[15:8] is interrupt flag of the PPI used by event-channel: + * bit 8: the PPI is edge(1) or level(0) triggered + * bit 9: the PPI is active low(1) or high(0) + * val[7:0] is a PPI number used by event-channel. + * This is only used by ARM/ARM64 and masking/eoi the interrupt associated to + * the notification is handled by the interrupt controller. + */ +#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK 0xFF00 +#define HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL 2 +#endif /* * These are not used by Xen. They are here for convenience of HVM-guest @@ -49,18 +79,103 @@ FILE_LICENCE ( MIT ); #define HVM_PARAM_STORE_PFN 1 #define HVM_PARAM_STORE_EVTCHN 2 -#define HVM_PARAM_PAE_ENABLED 4 - #define HVM_PARAM_IOREQ_PFN 5 #define HVM_PARAM_BUFIOREQ_PFN 6 -#define HVM_PARAM_BUFIOREQ_EVTCHN 26 #if defined(__i386__) || defined(__x86_64__) -/* Expose Viridian interfaces to this HVM guest? */ +/* + * Viridian enlightenments + * + * (See http://download.microsoft.com/download/A/B/4/AB43A34E-BDD0-4FA6-BDEF-79EEF16E880B/Hypervisor%20Top%20Level%20Functional%20Specification%20v4.0.docx) + * + * To expose viridian enlightenments to the guest set this parameter + * to the desired feature mask. The base feature set must be present + * in any valid feature mask. + */ #define HVM_PARAM_VIRIDIAN 9 +/* Base+Freq viridian feature sets: + * + * - Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) + * - APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR) + * - Virtual Processor index MSR (HV_X64_MSR_VP_INDEX) + * - Timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY) + */ +#define _HVMPV_base_freq 0 +#define HVMPV_base_freq (1 << _HVMPV_base_freq) + +/* Feature set modifications */ + +/* Disable timer frequency MSRs (HV_X64_MSR_TSC_FREQUENCY and + * HV_X64_MSR_APIC_FREQUENCY). + * This modification restores the viridian feature set to the + * original 'base' set exposed in releases prior to Xen 4.4. + */ +#define _HVMPV_no_freq 1 +#define HVMPV_no_freq (1 << _HVMPV_no_freq) + +/* Enable Partition Time Reference Counter (HV_X64_MSR_TIME_REF_COUNT) */ +#define _HVMPV_time_ref_count 2 +#define HVMPV_time_ref_count (1 << _HVMPV_time_ref_count) + +/* Enable Reference TSC Page (HV_X64_MSR_REFERENCE_TSC) */ +#define _HVMPV_reference_tsc 3 +#define HVMPV_reference_tsc (1 << _HVMPV_reference_tsc) + +/* Use Hypercall for remote TLB flush */ +#define _HVMPV_hcall_remote_tlb_flush 4 +#define HVMPV_hcall_remote_tlb_flush (1 << _HVMPV_hcall_remote_tlb_flush) + +/* Use APIC assist */ +#define _HVMPV_apic_assist 5 +#define HVMPV_apic_assist (1 << _HVMPV_apic_assist) + +/* Enable crash MSRs */ +#define _HVMPV_crash_ctl 6 +#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl) + +/* Enable SYNIC MSRs */ +#define _HVMPV_synic 7 +#define HVMPV_synic (1 << _HVMPV_synic) + +/* Enable STIMER MSRs */ +#define _HVMPV_stimer 8 +#define HVMPV_stimer (1 << _HVMPV_stimer) + +/* Use Synthetic Cluster IPI Hypercall */ +#define _HVMPV_hcall_ipi 9 +#define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi) + +/* Enable ExProcessorMasks */ +#define _HVMPV_ex_processor_masks 10 +#define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks) + +/* Allow more than 64 VPs */ +#define _HVMPV_no_vp_limit 11 +#define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit) + +/* Enable vCPU hotplug */ +#define _HVMPV_cpu_hotplug 12 +#define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug) + +#define HVMPV_feature_mask \ + (HVMPV_base_freq | \ + HVMPV_no_freq | \ + HVMPV_time_ref_count | \ + HVMPV_reference_tsc | \ + HVMPV_hcall_remote_tlb_flush | \ + HVMPV_apic_assist | \ + HVMPV_crash_ctl | \ + HVMPV_synic | \ + HVMPV_stimer | \ + HVMPV_hcall_ipi | \ + HVMPV_ex_processor_masks | \ + HVMPV_no_vp_limit | \ + HVMPV_cpu_hotplug) + #endif /* @@ -94,9 +209,6 @@ FILE_LICENCE ( MIT ); /* Identity-map page directory used by Intel EPT when CR0.PG=0. */ #define HVM_PARAM_IDENT_PT 12 -/* Device Model domain, defaults to 0. */ -#define HVM_PARAM_DM_DOMAIN 13 - /* ACPI S state: currently support S0 and S3 on x86. */ #define HVM_PARAM_ACPI_S_STATE 14 @@ -121,27 +233,9 @@ FILE_LICENCE ( MIT ); */ #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 -/* Enable blocking memory events, async or sync (pause vcpu until response) - * onchangeonly indicates messages only on a change of value */ -#define HVM_PARAM_MEMORY_EVENT_CR0 20 -#define HVM_PARAM_MEMORY_EVENT_CR3 21 -#define HVM_PARAM_MEMORY_EVENT_CR4 22 -#define HVM_PARAM_MEMORY_EVENT_INT3 23 -#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 -#define HVM_PARAM_MEMORY_EVENT_MSR 30 - -#define HVMPME_MODE_MASK (3 << 0) -#define HVMPME_mode_disabled 0 -#define HVMPME_mode_async 1 -#define HVMPME_mode_sync 2 -#define HVMPME_onchangeonly (1 << 2) - -/* Boolean: Enable nestedhvm (hvm only) */ -#define HVM_PARAM_NESTEDHVM 24 - /* Params for the mem event rings */ #define HVM_PARAM_PAGING_RING_PFN 27 -#define HVM_PARAM_ACCESS_RING_PFN 28 +#define HVM_PARAM_MONITOR_RING_PFN 28 #define HVM_PARAM_SHARING_RING_PFN 29 /* SHUTDOWN_* action in case of a triple fault */ @@ -153,6 +247,57 @@ FILE_LICENCE ( MIT ); /* Location of the VM Generation ID in guest physical address space. */ #define HVM_PARAM_VM_GENERATION_ID_ADDR 34 -#define HVM_NR_PARAMS 35 +/* + * Set mode for altp2m: + * disabled: don't activate altp2m (default) + * mixed: allow access to all altp2m ops for both in-guest and external tools + * external: allow access to external privileged tools only + * limited: guest only has limited access (ie. control VMFUNC and #VE) + * + * Note that 'mixed' mode has not been evaluated for safety from a + * security perspective. Before using this mode in a + * security-critical environment, each subop should be evaluated for + * safety, with unsafe subops blacklisted in XSM. + */ +#define HVM_PARAM_ALTP2M 35 +#define XEN_ALTP2M_disabled 0 +#define XEN_ALTP2M_mixed 1 +#define XEN_ALTP2M_external 2 +#define XEN_ALTP2M_limited 3 + +/* + * Size of the x87 FPU FIP/FDP registers that the hypervisor needs to + * save/restore. This is a workaround for a hardware limitation that + * does not allow the full FIP/FDP and FCS/FDS to be restored. + * + * Valid values are: + * + * 8: save/restore 64-bit FIP/FDP and clear FCS/FDS (default if CPU + * has FPCSDS feature). + * + * 4: save/restore 32-bit FIP/FDP, FCS/FDS, and clear upper 32-bits of + * FIP/FDP. + * + * 0: allow hypervisor to choose based on the value of FIP/FDP + * (default if CPU does not have FPCSDS). + * + * If FPCSDS (bit 13 in CPUID leaf 0x7, subleaf 0x0) is set, the CPU + * never saves FCS/FDS and this parameter should be left at the + * default of 8. + */ +#define HVM_PARAM_X87_FIP_WIDTH 36 + +/* + * TSS (and its size) used on Intel when CR0.PE=0. The address occupies + * the low 32 bits, while the size is in the high 32 ones. + */ +#define HVM_PARAM_VM86_TSS_SIZED 37 + +/* Enable MCA capabilities. */ +#define HVM_PARAM_MCA_CAP 38 +#define XEN_HVM_MCA_CAP_LMCE (xen_mk_ullong(1) << 0) +#define XEN_HVM_MCA_CAP_MASK XEN_HVM_MCA_CAP_LMCE + +#define HVM_NR_PARAMS 39 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ |