hypervisor.h revision 1.5 1 /* $NetBSD: hypervisor.h,v 1.5 2014/09/24 18:32:10 palle Exp $ */
2 /* $OpenBSD: hypervisor.h,v 1.14 2011/06/26 17:23:46 kettenis Exp $ */
3
4 /*
5 * Copyright (c) 2008 Mark Kettenis
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _HYPERVISOR_H_
21 #define _HYPERVISOR_H_
22
23 /*
24 * UltraSPARC Hypervisor API.
25 */
26
27 /*
28 * FAST_TRAP function numbers
29 */
30
31 #define FT_MMU_MAP_PERM_ADDR 0x25
32
33 /*
34 * API versioning
35 */
36
37 #ifndef _LOCORE
38 int64_t hv_api_get_version(uint64_t api_group,
39 uint64_t *major_number, uint64_t *minor_number);
40 #endif
41 /*
42 * Domain services
43 */
44
45 #ifndef _LOCORE
46 int64_t hv_mach_desc(paddr_t buffer, psize_t *length);
47 #endif
48
49 /*
50 * CPU services
51 */
52
53 #ifndef _LOCORE
54 void hv_cpu_yield(void);
55 int64_t hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries);
56 #endif
57
58 #define CPU_MONDO_QUEUE 0x3c
59 #define DEVICE_MONDO_QUEUE 0x3d
60
61 #ifndef _LOCORE
62 int64_t hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
63 int64_t hv_cpu_myid(uint64_t *cpuid);
64 #endif
65
66 /*
67 * MMU services
68 */
69
70 #ifndef _LOCORE
71 int64_t hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags);
72 int64_t hv_mmu_demap_ctx(uint64_t context, uint64_t flags);
73 int64_t hv_mmu_demap_all(uint64_t flags);
74 int64_t hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags);
75 int64_t hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags);
76 int64_t hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte,
77 uint64_t flags);
78 int64_t hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags);
79 #endif
80
81 #define MAP_DTLB 0x1
82 #define MAP_ITLB 0x2
83
84 #ifndef _LOCORE
85 struct tsb_desc {
86 uint16_t td_idxpgsz;
87 uint16_t td_assoc;
88 uint32_t td_size;
89 uint32_t td_ctxidx;
90 uint32_t td_pgsz;
91 paddr_t td_pa;
92 uint64_t td_reserved;
93 };
94
95 int64_t hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr);
96 int64_t hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr);
97 #endif
98
99 /*
100 * Cache and memory services
101 */
102
103 #ifndef _LOCORE
104 int64_t hv_mem_scrub(paddr_t raddr, psize_t length);
105 int64_t hv_mem_sync(paddr_t raddr, psize_t length);
106 #endif
107
108 /*
109 * Device interrupt services
110 */
111
112 #ifndef _LOCORE
113 int64_t hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino,
114 uint64_t *sysino);
115 int64_t hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled);
116 int64_t hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled);
117 int64_t hv_intr_getstate(uint64_t sysino, uint64_t *intr_state);
118 int64_t hv_intr_setstate(uint64_t sysino, uint64_t intr_state);
119 int64_t hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid);
120 int64_t hv_intr_settarget(uint64_t sysino, uint64_t cpuid);
121 #endif
122
123 #define INTR_DISABLED 0
124 #define INTR_ENABLED 1
125
126 #define INTR_IDLE 0
127 #define INTR_RECEIVED 1
128 #define INTR_DELIVERED 2
129
130 #ifndef _LOCORE
131 int64_t hv_vintr_getcookie(uint64_t devhandle, uint64_t devino,
132 uint64_t *cookie_value);
133 int64_t hv_vintr_setcookie(uint64_t devhandle, uint64_t devino,
134 uint64_t cookie_value);
135 int64_t hv_vintr_getenabled(uint64_t devhandle, uint64_t devino,
136 uint64_t *intr_enabled);
137 int64_t hv_vintr_setenabled(uint64_t devhandle, uint64_t devino,
138 uint64_t intr_enabled);
139 int64_t hv_vintr_getstate(uint64_t devhandle, uint64_t devino,
140 uint64_t *intr_state);
141 int64_t hv_vintr_setstate(uint64_t devhandle, uint64_t devino,
142 uint64_t intr_state);
143 int64_t hv_vintr_gettarget(uint64_t devhandle, uint64_t devino,
144 uint64_t *cpuid);
145 int64_t hv_vintr_settarget(uint64_t devhandle, uint64_t devino,
146 uint64_t cpuid);
147 #endif
148
149 /*
150 * Time of day services
151 */
152
153 #ifndef _LOCORE
154 int64_t hv_tod_get(uint64_t *tod);
155 int64_t hv_tod_set(uint64_t tod);
156 #endif
157
158 /*
159 * Console services
160 */
161
162 #ifndef _LOCORE
163 int64_t hv_cons_getchar(int64_t *ch);
164 int64_t hv_cons_putchar(int64_t ch);
165 int64_t hv_api_putchar(int64_t ch);
166 #endif
167
168 #define CONS_BREAK -1
169 #define CONS_HUP -2
170
171 /*
172 * Domain state services
173 */
174
175 #ifndef _LOCORE
176 int64_t hv_soft_state_set(uint64_t software_state,
177 paddr_t software_description_ptr);
178 #endif
179
180 #define SIS_NORMAL 0x1
181 #define SIS_TRANSITION 0x2
182
183 /*
184 * PCI I/O services
185 */
186
187 #ifndef _LOCORE
188 int64_t hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid,
189 uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p,
190 uint64_t *nttes_mapped);
191 int64_t hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid,
192 uint64_t nttes, uint64_t *nttes_demapped);
193 int64_t hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid,
194 uint64_t *io_attributes, paddr_t *r_addr);
195 int64_t hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr,
196 uint64_t io_attributes, uint64_t *io_addr);
197
198 int64_t hv_pci_config_get(uint64_t devhandle, uint64_t pci_device,
199 uint64_t pci_config_offset, uint64_t size,
200 uint64_t *error_flag, uint64_t *data);
201 int64_t hv_pci_config_put(uint64_t devhandle, uint64_t pci_device,
202 uint64_t pci_config_offset, uint64_t size, uint64_t data,
203 uint64_t *error_flag);
204 #endif
205
206 #define PCI_MAP_ATTR_READ 0x01 /* From memory */
207 #define PCI_MAP_ATTR_WRITE 0x02 /* To memory */
208
209 /*
210 * PCI MSI services
211 */
212
213 #ifndef _LOCORE
214 int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid,
215 uint64_t r_addr, uint64_t nentries);
216 int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid,
217 uint64_t *r_addr, uint64_t *nentries);
218
219 int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid,
220 uint64_t *msiqvalid);
221 int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid,
222 uint64_t msiqvalid);
223 #endif
224
225 #define PCI_MSIQ_INVALID 0
226 #define PCI_MSIQ_VALID 1
227
228 #ifndef _LOCORE
229 int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid,
230 uint64_t *msiqstate);
231 int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid,
232 uint64_t msiqstate);
233 #endif
234
235 #define PCI_MSIQSTATE_IDLE 0
236 #define PCI_MSIQSTATE_ERROR 1
237
238 #ifndef _LOCORE
239 int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid,
240 uint64_t *msiqhead);
241 int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid,
242 uint64_t msiqhead);
243 int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid,
244 uint64_t *msiqtail);
245
246 int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum,
247 uint64_t *msivalidstate);
248 int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum,
249 uint64_t msivalidstate);
250 #endif
251
252 #define PCI_MSI_INVALID 0
253 #define PCI_MSI_VALID 1
254
255 #ifndef _LOCORE
256 int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum,
257 uint64_t *msiqid);
258 int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum,
259 uint64_t msitype, uint64_t msiqid);
260
261 int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum,
262 uint64_t *msistate);
263 int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum,
264 uint64_t msistate);
265 #endif
266
267 #define PCI_MSISTATE_IDLE 0
268 #define PCI_MSISTATE_DELIVERED 1
269
270 #ifndef _LOCORE
271 int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg,
272 uint64_t *msiqid);
273 int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg,
274 uint64_t msiqid);
275
276 int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg,
277 uint64_t *msgvalidstate);
278 int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg,
279 uint64_t msgvalidstate);
280 #endif
281
282 #define PCIE_MSG_INVALID 0
283 #define PCIE_MSG_VALID 1
284
285 #define PCIE_PME_MSG 0x18
286 #define PCIE_PME_ACK_MSG 0x1b
287 #define PCIE_CORR_MSG 0x30
288 #define PCIE_NONFATAL_MSG 0x31
289 #define PCIE_FATAL_MSG 0x32
290
291 /*
292 * Logical Domain Channel services
293 */
294
295 #ifndef _LOCORE
296 int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr,
297 uint64_t nentries);
298 int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
299 uint64_t *nentries);
300 int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset,
301 uint64_t *tail_offset, uint64_t *channel_state);
302 int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset);
303 int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr,
304 uint64_t nentries);
305 int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
306 uint64_t *nentries);
307 int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset,
308 uint64_t *tail_offset, uint64_t *channel_state);
309 int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset);
310 #endif
311
312 #define LDC_CHANNEL_DOWN 0
313 #define LDC_CHANNEL_UP 1
314 #define LDC_CHANNEL_RESET 2
315
316 #ifndef _LOCORE
317 int64_t hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr,
318 uint64_t nentries);
319 int64_t hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr,
320 uint64_t *nentries);
321 int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie,
322 paddr_t raddr, psize_t length, psize_t *ret_length);
323 #endif
324
325 #define LDC_COPY_IN 0
326 #define LDC_COPY_OUT 1
327
328 #ifndef _LOCORE
329 int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr,
330 uint64_t *perms);
331 int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms);
332 #endif
333
334 /*
335 * Cryptographic services
336 */
337
338 #ifndef _LOCORE
339 int64_t hv_rng_get_diag_control(void);
340 int64_t hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta);
341 int64_t hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout,
342 uint64_t *delta);
343 #endif
344
345 #define RNG_STATE_UNCONFIGURED 0
346 #define RNG_STATE_CONFIGURED 1
347 #define RNG_STATE_HEALTHCHECK 2
348 #define RNG_STATE_ERROR 3
349
350 #ifndef _LOCORE
351 int64_t hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta);
352 int64_t hv_rng_data_read(paddr_t raddr, uint64_t *delta);
353 #endif
354
355 /*
356 * Error codes
357 */
358
359 #define H_EOK 0
360 #define H_ENOCPU 1
361 #define H_ENORADDR 2
362 #define H_ENOINTR 3
363 #define H_EBADPGSZ 4
364 #define H_EBADTSB 5
365 #define H_EINVAL 6
366 #define H_EBADTRAP 7
367 #define H_EBADALIGN 8
368 #define H_EWOULDBLOCK 9
369 #define H_ENOACCESS 10
370 #define H_EIO 11
371 #define H_ECPUERROR 12
372 #define H_ENOTSUPPORTED 13
373 #define H_ENOMAP 14
374 #define H_ETOOMANY 15
375 #define H_ECHANNEL 16
376
377 #endif /* _HYPERVISOR_H_ */
378