hypervisor.h revision 1.4.4.2 1 /* $NetBSD: hypervisor.h,v 1.4.4.2 2014/08/20 00:03:25 tls Exp $ */
2 /* $OpenBSD: hypervisor.h,v 1.14 2011/06/26 17:23:46 kettenis Exp $ */
3
4 /*
5 * Copyright (c) 2008 Mark Kettenis
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _HYPERVISOR_H_
21 #define _HYPERVISOR_H_
22
23 /*
24 * UltraSPARC Hypervisor API.
25 */
26
27 /*
28 * API versioning
29 */
30
31 #ifndef _LOCORE
32 int64_t hv_api_get_version(uint64_t api_group,
33 uint64_t *major_number, uint64_t *minor_number);
34 #endif
35 /*
36 * Domain services
37 */
38
39 #ifndef _LOCORE
40 int64_t hv_mach_desc(paddr_t buffer, psize_t *length);
41 #endif
42
43 /*
44 * CPU services
45 */
46
47 #ifndef _LOCORE
48 void hv_cpu_yield(void);
49 int64_t hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries);
50 #endif
51
52 #define CPU_MONDO_QUEUE 0x3c
53 #define DEVICE_MONDO_QUEUE 0x3d
54
55 #ifndef _LOCORE
56 int64_t hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
57 int64_t hv_cpu_myid(uint64_t *cpuid);
58 #endif
59
60 /*
61 * MMU services
62 */
63
64 #ifndef _LOCORE
65 int64_t hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags);
66 int64_t hv_mmu_demap_ctx(uint64_t context, uint64_t flags);
67 int64_t hv_mmu_demap_all(uint64_t flags);
68 int64_t hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags);
69 int64_t hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags);
70 int64_t hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte,
71 uint64_t flags);
72 int64_t hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags);
73 #endif
74
75 #define MAP_DTLB 0x1
76 #define MAP_ITLB 0x2
77
78 #ifndef _LOCORE
79 struct tsb_desc {
80 uint16_t td_idxpgsz;
81 uint16_t td_assoc;
82 uint32_t td_size;
83 uint32_t td_ctxidx;
84 uint32_t td_pgsz;
85 paddr_t td_pa;
86 uint64_t td_reserved;
87 };
88
89 int64_t hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr);
90 int64_t hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr);
91 #endif
92
93 /*
94 * Cache and memory services
95 */
96
97 #ifndef _LOCORE
98 int64_t hv_mem_scrub(paddr_t raddr, psize_t length);
99 int64_t hv_mem_sync(paddr_t raddr, psize_t length);
100 #endif
101
102 /*
103 * Device interrupt services
104 */
105
106 #ifndef _LOCORE
107 int64_t hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino,
108 uint64_t *sysino);
109 int64_t hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled);
110 int64_t hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled);
111 int64_t hv_intr_getstate(uint64_t sysino, uint64_t *intr_state);
112 int64_t hv_intr_setstate(uint64_t sysino, uint64_t intr_state);
113 int64_t hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid);
114 int64_t hv_intr_settarget(uint64_t sysino, uint64_t cpuid);
115 #endif
116
117 #define INTR_DISABLED 0
118 #define INTR_ENABLED 1
119
120 #define INTR_IDLE 0
121 #define INTR_RECEIVED 1
122 #define INTR_DELIVERED 2
123
124 #ifndef _LOCORE
125 int64_t hv_vintr_getcookie(uint64_t devhandle, uint64_t devino,
126 uint64_t *cookie_value);
127 int64_t hv_vintr_setcookie(uint64_t devhandle, uint64_t devino,
128 uint64_t cookie_value);
129 int64_t hv_vintr_getenabled(uint64_t devhandle, uint64_t devino,
130 uint64_t *intr_enabled);
131 int64_t hv_vintr_setenabled(uint64_t devhandle, uint64_t devino,
132 uint64_t intr_enabled);
133 int64_t hv_vintr_getstate(uint64_t devhandle, uint64_t devino,
134 uint64_t *intr_state);
135 int64_t hv_vintr_setstate(uint64_t devhandle, uint64_t devino,
136 uint64_t intr_state);
137 int64_t hv_vintr_gettarget(uint64_t devhandle, uint64_t devino,
138 uint64_t *cpuid);
139 int64_t hv_vintr_settarget(uint64_t devhandle, uint64_t devino,
140 uint64_t cpuid);
141 #endif
142
143 /*
144 * Time of day services
145 */
146
147 #ifndef _LOCORE
148 int64_t hv_tod_get(uint64_t *tod);
149 int64_t hv_tod_set(uint64_t tod);
150 #endif
151
152 /*
153 * Console services
154 */
155
156 #ifndef _LOCORE
157 int64_t hv_cons_getchar(int64_t *ch);
158 int64_t hv_cons_putchar(int64_t ch);
159 int64_t hv_api_putchar(int64_t ch);
160 #endif
161
162 #define CONS_BREAK -1
163 #define CONS_HUP -2
164
165 /*
166 * Domain state services
167 */
168
169 #ifndef _LOCORE
170 int64_t hv_soft_state_set(uint64_t software_state,
171 paddr_t software_description_ptr);
172 #endif
173
174 #define SIS_NORMAL 0x1
175 #define SIS_TRANSITION 0x2
176
177 /*
178 * PCI I/O services
179 */
180
181 #ifndef _LOCORE
182 int64_t hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid,
183 uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p,
184 uint64_t *nttes_mapped);
185 int64_t hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid,
186 uint64_t nttes, uint64_t *nttes_demapped);
187 int64_t hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid,
188 uint64_t *io_attributes, paddr_t *r_addr);
189 int64_t hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr,
190 uint64_t io_attributes, uint64_t *io_addr);
191
192 int64_t hv_pci_config_get(uint64_t devhandle, uint64_t pci_device,
193 uint64_t pci_config_offset, uint64_t size,
194 uint64_t *error_flag, uint64_t *data);
195 int64_t hv_pci_config_put(uint64_t devhandle, uint64_t pci_device,
196 uint64_t pci_config_offset, uint64_t size, uint64_t data,
197 uint64_t *error_flag);
198 #endif
199
200 #define PCI_MAP_ATTR_READ 0x01 /* From memory */
201 #define PCI_MAP_ATTR_WRITE 0x02 /* To memory */
202
203 /*
204 * PCI MSI services
205 */
206
207 #ifndef _LOCORE
208 int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid,
209 uint64_t r_addr, uint64_t nentries);
210 int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid,
211 uint64_t *r_addr, uint64_t *nentries);
212
213 int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid,
214 uint64_t *msiqvalid);
215 int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid,
216 uint64_t msiqvalid);
217 #endif
218
219 #define PCI_MSIQ_INVALID 0
220 #define PCI_MSIQ_VALID 1
221
222 #ifndef _LOCORE
223 int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid,
224 uint64_t *msiqstate);
225 int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid,
226 uint64_t msiqstate);
227 #endif
228
229 #define PCI_MSIQSTATE_IDLE 0
230 #define PCI_MSIQSTATE_ERROR 1
231
232 #ifndef _LOCORE
233 int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid,
234 uint64_t *msiqhead);
235 int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid,
236 uint64_t msiqhead);
237 int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid,
238 uint64_t *msiqtail);
239
240 int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum,
241 uint64_t *msivalidstate);
242 int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum,
243 uint64_t msivalidstate);
244 #endif
245
246 #define PCI_MSI_INVALID 0
247 #define PCI_MSI_VALID 1
248
249 #ifndef _LOCORE
250 int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum,
251 uint64_t *msiqid);
252 int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum,
253 uint64_t msitype, uint64_t msiqid);
254
255 int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum,
256 uint64_t *msistate);
257 int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum,
258 uint64_t msistate);
259 #endif
260
261 #define PCI_MSISTATE_IDLE 0
262 #define PCI_MSISTATE_DELIVERED 1
263
264 #ifndef _LOCORE
265 int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg,
266 uint64_t *msiqid);
267 int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg,
268 uint64_t msiqid);
269
270 int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg,
271 uint64_t *msgvalidstate);
272 int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg,
273 uint64_t msgvalidstate);
274 #endif
275
276 #define PCIE_MSG_INVALID 0
277 #define PCIE_MSG_VALID 1
278
279 #define PCIE_PME_MSG 0x18
280 #define PCIE_PME_ACK_MSG 0x1b
281 #define PCIE_CORR_MSG 0x30
282 #define PCIE_NONFATAL_MSG 0x31
283 #define PCIE_FATAL_MSG 0x32
284
285 /*
286 * Logical Domain Channel services
287 */
288
289 #ifndef _LOCORE
290 int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr,
291 uint64_t nentries);
292 int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
293 uint64_t *nentries);
294 int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset,
295 uint64_t *tail_offset, uint64_t *channel_state);
296 int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset);
297 int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr,
298 uint64_t nentries);
299 int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
300 uint64_t *nentries);
301 int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset,
302 uint64_t *tail_offset, uint64_t *channel_state);
303 int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset);
304 #endif
305
306 #define LDC_CHANNEL_DOWN 0
307 #define LDC_CHANNEL_UP 1
308 #define LDC_CHANNEL_RESET 2
309
310 #ifndef _LOCORE
311 int64_t hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr,
312 uint64_t nentries);
313 int64_t hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr,
314 uint64_t *nentries);
315 int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie,
316 paddr_t raddr, psize_t length, psize_t *ret_length);
317 #endif
318
319 #define LDC_COPY_IN 0
320 #define LDC_COPY_OUT 1
321
322 #ifndef _LOCORE
323 int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr,
324 uint64_t *perms);
325 int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms);
326 #endif
327
328 /*
329 * Cryptographic services
330 */
331
332 #ifndef _LOCORE
333 int64_t hv_rng_get_diag_control(void);
334 int64_t hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta);
335 int64_t hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout,
336 uint64_t *delta);
337 #endif
338
339 #define RNG_STATE_UNCONFIGURED 0
340 #define RNG_STATE_CONFIGURED 1
341 #define RNG_STATE_HEALTHCHECK 2
342 #define RNG_STATE_ERROR 3
343
344 #ifndef _LOCORE
345 int64_t hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta);
346 int64_t hv_rng_data_read(paddr_t raddr, uint64_t *delta);
347 #endif
348
349 /*
350 * Error codes
351 */
352
353 #define H_EOK 0
354 #define H_ENOCPU 1
355 #define H_ENORADDR 2
356 #define H_ENOINTR 3
357 #define H_EBADPGSZ 4
358 #define H_EBADTSB 5
359 #define H_EINVAL 6
360 #define H_EBADTRAP 7
361 #define H_EBADALIGN 8
362 #define H_EWOULDBLOCK 9
363 #define H_ENOACCESS 10
364 #define H_EIO 11
365 #define H_ECPUERROR 12
366 #define H_ENOTSUPPORTED 13
367 #define H_ENOMAP 14
368 #define H_ETOOMANY 15
369 #define H_ECHANNEL 16
370
371 #endif /* _HYPERVISOR_H_ */
372