Home | History | Annotate | Line # | Download | only in hvm
      1 /*
      2  * Copyright (c) 2016, Citrix Systems Inc
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a copy
      5  * of this software and associated documentation files (the "Software"), to
      6  * deal in the Software without restriction, including without limitation the
      7  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
      8  * sell copies of the Software, and to permit persons to whom the Software is
      9  * furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice shall be included in
     12  * all copies or substantial portions of the Software.
     13  *
     14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     17  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20  * DEALINGS IN THE SOFTWARE.
     21  *
     22  */
     23 
     24 #ifndef __XEN_PUBLIC_HVM_DM_OP_H__
     25 #define __XEN_PUBLIC_HVM_DM_OP_H__
     26 
     27 #include "../xen.h"
     28 
     29 #if defined(__XEN__) || defined(__XEN_TOOLS__)
     30 
     31 #include "../event_channel.h"
     32 
     33 #ifndef uint64_aligned_t
     34 #define uint64_aligned_t uint64_t
     35 #endif
     36 
     37 /*
     38  * IOREQ Servers
     39  *
     40  * The interface between an I/O emulator an Xen is called an IOREQ Server.
     41  * A domain supports a single 'legacy' IOREQ Server which is instantiated if
     42  * parameter...
     43  *
     44  * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
     45  * ioreq structures), or...
     46  * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
     47  * ioreq ring), or...
     48  * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
     49  * to request buffered I/O emulation).
     50  *
     51  * The following hypercalls facilitate the creation of IOREQ Servers for
     52  * 'secondary' emulators which are invoked to implement port I/O, memory, or
     53  * PCI config space ranges which they explicitly register.
     54  */
     55 
     56 typedef uint16_t ioservid_t;
     57 
     58 /*
     59  * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
     60  *                               secondary emulator.
     61  *
     62  * The <id> handed back is unique for target domain. The valur of
     63  * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
     64  * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then  the buffered
     65  * ioreq ring will not be allocated and hence all emulation requests to
     66  * this server will be synchronous.
     67  */
     68 #define XEN_DMOP_create_ioreq_server 1
     69 
     70 struct xen_dm_op_create_ioreq_server {
     71     /* IN - should server handle buffered ioreqs */
     72     uint8_t handle_bufioreq;
     73     uint8_t pad[3];
     74     /* OUT - server id */
     75     ioservid_t id;
     76 };
     77 
     78 /*
     79  * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
     80  *                                 access IOREQ Server <id>.
     81  *
     82  * If the IOREQ Server is handling buffered emulation requests, the
     83  * emulator needs to bind to event channel <bufioreq_port> to listen for
     84  * them. (The event channels used for synchronous emulation requests are
     85  * specified in the per-CPU ioreq structures).
     86  * In addition, if the XENMEM_acquire_resource memory op cannot be used,
     87  * the emulator will need to map the synchronous ioreq structures and
     88  * buffered ioreq ring (if it exists) from guest memory. If <flags> does
     89  * not contain XEN_DMOP_no_gfns then these pages will be made available and
     90  * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
     91  * respectively. (If the IOREQ Server is not handling buffered emulation
     92  * only <ioreq_gfn> will be valid).
     93  *
     94  * NOTE: To access the synchronous ioreq structures and buffered ioreq
     95  *       ring, it is preferable to use the XENMEM_acquire_resource memory
     96  *       op specifying resource type XENMEM_resource_ioreq_server.
     97  */
     98 #define XEN_DMOP_get_ioreq_server_info 2
     99 
    100 struct xen_dm_op_get_ioreq_server_info {
    101     /* IN - server id */
    102     ioservid_t id;
    103     /* IN - flags */
    104     uint16_t flags;
    105 
    106 #define _XEN_DMOP_no_gfns 0
    107 #define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
    108 
    109     /* OUT - buffered ioreq port */
    110     evtchn_port_t bufioreq_port;
    111     /* OUT - sync ioreq gfn (see block comment above) */
    112     uint64_aligned_t ioreq_gfn;
    113     /* OUT - buffered ioreq gfn (see block comment above)*/
    114     uint64_aligned_t bufioreq_gfn;
    115 };
    116 
    117 /*
    118  * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
    119  *                                        emulation by the client of
    120  *                                        IOREQ Server <id>.
    121  * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
    122  *                                            previously registered for
    123  *                                            emulation by the client of
    124  *                                            IOREQ Server <id>.
    125  *
    126  * There are three types of I/O that can be emulated: port I/O, memory
    127  * accesses and PCI config space accesses. The <type> field denotes which
    128  * type of range* the <start> and <end> (inclusive) fields are specifying.
    129  * PCI config space ranges are specified by segment/bus/device/function
    130  * values which should be encoded using the DMOP_PCI_SBDF helper macro
    131  * below.
    132  *
    133  * NOTE: unless an emulation request falls entirely within a range mapped
    134  * by a secondary emulator, it will not be passed to that emulator.
    135  */
    136 #define XEN_DMOP_map_io_range_to_ioreq_server 3
    137 #define XEN_DMOP_unmap_io_range_from_ioreq_server 4
    138 
    139 struct xen_dm_op_ioreq_server_range {
    140     /* IN - server id */
    141     ioservid_t id;
    142     uint16_t pad;
    143     /* IN - type of range */
    144     uint32_t type;
    145 # define XEN_DMOP_IO_RANGE_PORT   0 /* I/O port range */
    146 # define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
    147 # define XEN_DMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
    148     /* IN - inclusive start and end of range */
    149     uint64_aligned_t start, end;
    150 };
    151 
    152 #define XEN_DMOP_PCI_SBDF(s,b,d,f) \
    153 	((((s) & 0xffff) << 16) |  \
    154 	 (((b) & 0xff) << 8) |     \
    155 	 (((d) & 0x1f) << 3) |     \
    156 	 ((f) & 0x07))
    157 
    158 /*
    159  * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
    160  *
    161  * The IOREQ Server will not be passed any emulation requests until it is
    162  * in the enabled state.
    163  * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
    164  * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
    165  * is in the enabled state.
    166  */
    167 #define XEN_DMOP_set_ioreq_server_state 5
    168 
    169 struct xen_dm_op_set_ioreq_server_state {
    170     /* IN - server id */
    171     ioservid_t id;
    172     /* IN - enabled? */
    173     uint8_t enabled;
    174     uint8_t pad;
    175 };
    176 
    177 /*
    178  * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
    179  *
    180  * Any registered I/O ranges will be automatically deregistered.
    181  */
    182 #define XEN_DMOP_destroy_ioreq_server 6
    183 
    184 struct xen_dm_op_destroy_ioreq_server {
    185     /* IN - server id */
    186     ioservid_t id;
    187     uint16_t pad;
    188 };
    189 
    190 /*
    191  * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
    192  *                            range.
    193  *
    194  * NOTE: The bitmap passed back to the caller is passed in a
    195  *       secondary buffer.
    196  */
    197 #define XEN_DMOP_track_dirty_vram 7
    198 
    199 struct xen_dm_op_track_dirty_vram {
    200     /* IN - number of pages to be tracked */
    201     uint32_t nr;
    202     uint32_t pad;
    203     /* IN - first pfn to track */
    204     uint64_aligned_t first_pfn;
    205 };
    206 
    207 /*
    208  * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
    209  *                              PCI INTx pins.
    210  */
    211 #define XEN_DMOP_set_pci_intx_level 8
    212 
    213 struct xen_dm_op_set_pci_intx_level {
    214     /* IN - PCI INTx identification (domain:bus:device:intx) */
    215     uint16_t domain;
    216     uint8_t bus, device, intx;
    217     /* IN - Level: 0 -> deasserted, 1 -> asserted */
    218     uint8_t  level;
    219 };
    220 
    221 /*
    222  * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
    223  *                             ISA IRQ lines.
    224  */
    225 #define XEN_DMOP_set_isa_irq_level 9
    226 
    227 struct xen_dm_op_set_isa_irq_level {
    228     /* IN - ISA IRQ (0-15) */
    229     uint8_t  isa_irq;
    230     /* IN - Level: 0 -> deasserted, 1 -> asserted */
    231     uint8_t  level;
    232 };
    233 
    234 /*
    235  * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
    236  */
    237 #define XEN_DMOP_set_pci_link_route 10
    238 
    239 struct xen_dm_op_set_pci_link_route {
    240     /* PCI INTx line (0-3) */
    241     uint8_t  link;
    242     /* ISA IRQ (1-15) or 0 -> disable link */
    243     uint8_t  isa_irq;
    244 };
    245 
    246 /*
    247  * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
    248  *                           an emulator.
    249  *
    250  * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
    251  * @nr_extents entries.
    252  *
    253  * On error, @nr_extents will contain the index+1 of the extent that
    254  * had the error.  It is not defined if or which pages may have been
    255  * marked as dirty, in this event.
    256  */
    257 #define XEN_DMOP_modified_memory 11
    258 
    259 struct xen_dm_op_modified_memory {
    260     /*
    261      * IN - Number of extents to be processed
    262      * OUT -returns n+1 for failing extent
    263      */
    264     uint32_t nr_extents;
    265     /* IN/OUT - Must be set to 0 */
    266     uint32_t opaque;
    267 };
    268 
    269 struct xen_dm_op_modified_memory_extent {
    270     /* IN - number of contiguous pages modified */
    271     uint32_t nr;
    272     uint32_t pad;
    273     /* IN - first pfn modified */
    274     uint64_aligned_t first_pfn;
    275 };
    276 
    277 /*
    278  * XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
    279  *                        in a specific way. (See definition of
    280  *                        hvmmem_type_t).
    281  *
    282  * NOTE: In the event of a continuation (return code -ERESTART), the
    283  *       @first_pfn is set to the value of the pfn of the remaining
    284  *       region and @nr reduced to the size of the remaining region.
    285  */
    286 #define XEN_DMOP_set_mem_type 12
    287 
    288 struct xen_dm_op_set_mem_type {
    289     /* IN - number of contiguous pages */
    290     uint32_t nr;
    291     /* IN - new hvmmem_type_t of region */
    292     uint16_t mem_type;
    293     uint16_t pad;
    294     /* IN - first pfn in region */
    295     uint64_aligned_t first_pfn;
    296 };
    297 
    298 /*
    299  * XEN_DMOP_inject_event: Inject an event into a VCPU, which will
    300  *                        get taken up when it is next scheduled.
    301  *
    302  * Note that the caller should know enough of the state of the CPU before
    303  * injecting, to know what the effect of injecting the event will be.
    304  */
    305 #define XEN_DMOP_inject_event 13
    306 
    307 struct xen_dm_op_inject_event {
    308     /* IN - index of vCPU */
    309     uint32_t vcpuid;
    310     /* IN - interrupt vector */
    311     uint8_t vector;
    312     /* IN - event type (DMOP_EVENT_* ) */
    313     uint8_t type;
    314 /* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
    315 # define XEN_DMOP_EVENT_ext_int    0 /* external interrupt */
    316 # define XEN_DMOP_EVENT_nmi        2 /* nmi */
    317 # define XEN_DMOP_EVENT_hw_exc     3 /* hardware exception */
    318 # define XEN_DMOP_EVENT_sw_int     4 /* software interrupt (CD nn) */
    319 # define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
    320 # define XEN_DMOP_EVENT_sw_exc     6 /* INT3 (CC), INTO (CE) */
    321     /* IN - instruction length */
    322     uint8_t insn_len;
    323     uint8_t pad0;
    324     /* IN - error code (or ~0 to skip) */
    325     uint32_t error_code;
    326     uint32_t pad1;
    327     /* IN - CR2 for page faults */
    328     uint64_aligned_t cr2;
    329 };
    330 
    331 /*
    332  * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
    333  */
    334 #define XEN_DMOP_inject_msi 14
    335 
    336 struct xen_dm_op_inject_msi {
    337     /* IN - MSI data (lower 32 bits) */
    338     uint32_t data;
    339     uint32_t pad;
    340     /* IN - MSI address (0xfeexxxxx) */
    341     uint64_aligned_t addr;
    342 };
    343 
    344 /*
    345  * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
    346  *                                      to specific memory type <type>
    347  *                                      for specific accesses <flags>
    348  *
    349  * For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
    350  * which means only write operations are to be forwarded to an ioreq server.
    351  * Support for the emulation of read operations can be added when an ioreq
    352  * server has such requirement in future.
    353  */
    354 #define XEN_DMOP_map_mem_type_to_ioreq_server 15
    355 
    356 struct xen_dm_op_map_mem_type_to_ioreq_server {
    357     ioservid_t id;      /* IN - ioreq server id */
    358     uint16_t type;      /* IN - memory type */
    359     uint32_t flags;     /* IN - types of accesses to be forwarded to the
    360                            ioreq server. flags with 0 means to unmap the
    361                            ioreq server */
    362 
    363 #define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
    364 #define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
    365 
    366     uint64_t opaque;    /* IN/OUT - only used for hypercall continuation,
    367                            has to be set to zero by the caller */
    368 };
    369 
    370 /*
    371  * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
    372  *                            Identical to SCHEDOP_remote_shutdown
    373  */
    374 #define XEN_DMOP_remote_shutdown 16
    375 
    376 struct xen_dm_op_remote_shutdown {
    377     uint32_t reason;       /* SHUTDOWN_* => enum sched_shutdown_reason */
    378                            /* (Other reason values are not blocked) */
    379 };
    380 
    381 /*
    382  * XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest.
    383  *                            Identical to XENMEM_add_to_physmap with
    384  *                            space == XENMAPSPACE_gmfn_range.
    385  */
    386 #define XEN_DMOP_relocate_memory 17
    387 
    388 struct xen_dm_op_relocate_memory {
    389     /* All fields are IN/OUT, with their OUT state undefined. */
    390     /* Number of GFNs to process. */
    391     uint32_t size;
    392     uint32_t pad;
    393     /* Starting GFN to relocate. */
    394     uint64_aligned_t src_gfn;
    395     /* Starting GFN where GFNs should be relocated. */
    396     uint64_aligned_t dst_gfn;
    397 };
    398 
    399 /*
    400  * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space.
    401  *                                 Identical to XEN_DOMCTL_pin_mem_cacheattr.
    402  */
    403 #define XEN_DMOP_pin_memory_cacheattr 18
    404 
    405 struct xen_dm_op_pin_memory_cacheattr {
    406     uint64_aligned_t start; /* Start gfn. */
    407     uint64_aligned_t end;   /* End gfn. */
    408 /* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
    409 #define XEN_DMOP_MEM_CACHEATTR_UC  0
    410 #define XEN_DMOP_MEM_CACHEATTR_WC  1
    411 #define XEN_DMOP_MEM_CACHEATTR_WT  4
    412 #define XEN_DMOP_MEM_CACHEATTR_WP  5
    413 #define XEN_DMOP_MEM_CACHEATTR_WB  6
    414 #define XEN_DMOP_MEM_CACHEATTR_UCM 7
    415 #define XEN_DMOP_DELETE_MEM_CACHEATTR (~(uint32_t)0)
    416     uint32_t type;          /* XEN_DMOP_MEM_CACHEATTR_* */
    417     uint32_t pad;
    418 };
    419 
    420 struct xen_dm_op {
    421     uint32_t op;
    422     uint32_t pad;
    423     union {
    424         struct xen_dm_op_create_ioreq_server create_ioreq_server;
    425         struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
    426         struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
    427         struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
    428         struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
    429         struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
    430         struct xen_dm_op_track_dirty_vram track_dirty_vram;
    431         struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
    432         struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
    433         struct xen_dm_op_set_pci_link_route set_pci_link_route;
    434         struct xen_dm_op_modified_memory modified_memory;
    435         struct xen_dm_op_set_mem_type set_mem_type;
    436         struct xen_dm_op_inject_event inject_event;
    437         struct xen_dm_op_inject_msi inject_msi;
    438         struct xen_dm_op_map_mem_type_to_ioreq_server
    439                 map_mem_type_to_ioreq_server;
    440         struct xen_dm_op_remote_shutdown remote_shutdown;
    441         struct xen_dm_op_relocate_memory relocate_memory;
    442         struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr;
    443     } u;
    444 };
    445 
    446 #endif /* __XEN__ || __XEN_TOOLS__ */
    447 
    448 struct xen_dm_op_buf {
    449     XEN_GUEST_HANDLE(void) h;
    450     xen_ulong_t size;
    451 };
    452 typedef struct xen_dm_op_buf xen_dm_op_buf_t;
    453 DEFINE_XEN_GUEST_HANDLE(xen_dm_op_buf_t);
    454 
    455 /* ` enum neg_errnoval
    456  * ` HYPERVISOR_dm_op(domid_t domid,
    457  * `                  unsigned int nr_bufs,
    458  * `                  xen_dm_op_buf_t bufs[])
    459  * `
    460  *
    461  * @domid is the domain the hypercall operates on.
    462  * @nr_bufs is the number of buffers in the @bufs array.
    463  * @bufs points to an array of buffers where @bufs[0] contains a struct
    464  * xen_dm_op, describing the specific device model operation and its
    465  * parameters.
    466  * @bufs[1..] may be referenced in the parameters for the purposes of
    467  * passing extra information to or from the domain.
    468  */
    469 
    470 #endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */
    471 
    472 /*
    473  * Local variables:
    474  * mode: C
    475  * c-file-style: "BSD"
    476  * c-basic-offset: 4
    477  * tab-width: 4
    478  * indent-tabs-mode: nil
    479  * End:
    480  */
    481