hypervisor.h revision 1.5.2.1 1 1.5.2.1 skrll /* $NetBSD: hypervisor.h,v 1.5.2.1 2017/08/28 17:51:53 skrll Exp $ */
2 1.1 palle /* $OpenBSD: hypervisor.h,v 1.14 2011/06/26 17:23:46 kettenis Exp $ */
3 1.1 palle
4 1.1 palle /*
5 1.1 palle * Copyright (c) 2008 Mark Kettenis
6 1.1 palle *
7 1.1 palle * Permission to use, copy, modify, and distribute this software for any
8 1.1 palle * purpose with or without fee is hereby granted, provided that the above
9 1.1 palle * copyright notice and this permission notice appear in all copies.
10 1.1 palle *
11 1.1 palle * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 1.1 palle * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 1.1 palle * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 1.1 palle * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 1.1 palle * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 1.1 palle * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 1.1 palle * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 1.1 palle */
19 1.1 palle
20 1.3 palle #ifndef _HYPERVISOR_H_
21 1.3 palle #define _HYPERVISOR_H_
22 1.3 palle
23 1.1 palle /*
24 1.1 palle * UltraSPARC Hypervisor API.
25 1.1 palle */
26 1.1 palle
27 1.1 palle /*
28 1.5 palle * FAST_TRAP function numbers
29 1.5 palle */
30 1.5 palle
31 1.5 palle #define FT_MMU_MAP_PERM_ADDR 0x25
32 1.5 palle
33 1.5 palle /*
34 1.1 palle * API versioning
35 1.1 palle */
36 1.1 palle
37 1.4 palle #ifndef _LOCORE
38 1.1 palle int64_t hv_api_get_version(uint64_t api_group,
39 1.1 palle uint64_t *major_number, uint64_t *minor_number);
40 1.4 palle #endif
41 1.1 palle /*
42 1.1 palle * Domain services
43 1.1 palle */
44 1.1 palle
45 1.4 palle #ifndef _LOCORE
46 1.1 palle int64_t hv_mach_desc(paddr_t buffer, psize_t *length);
47 1.4 palle #endif
48 1.1 palle
49 1.1 palle /*
50 1.1 palle * CPU services
51 1.1 palle */
52 1.1 palle
53 1.4 palle #ifndef _LOCORE
54 1.1 palle void hv_cpu_yield(void);
55 1.1 palle int64_t hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries);
56 1.4 palle #endif
57 1.1 palle
58 1.1 palle #define CPU_MONDO_QUEUE 0x3c
59 1.1 palle #define DEVICE_MONDO_QUEUE 0x3d
60 1.1 palle
61 1.4 palle #ifndef _LOCORE
62 1.1 palle int64_t hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data);
63 1.1 palle int64_t hv_cpu_myid(uint64_t *cpuid);
64 1.4 palle #endif
65 1.1 palle
66 1.1 palle /*
67 1.1 palle * MMU services
68 1.1 palle */
69 1.1 palle
70 1.4 palle #ifndef _LOCORE
71 1.1 palle int64_t hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags);
72 1.1 palle int64_t hv_mmu_demap_ctx(uint64_t context, uint64_t flags);
73 1.1 palle int64_t hv_mmu_demap_all(uint64_t flags);
74 1.1 palle int64_t hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags);
75 1.1 palle int64_t hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags);
76 1.1 palle int64_t hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte,
77 1.1 palle uint64_t flags);
78 1.1 palle int64_t hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags);
79 1.4 palle #endif
80 1.1 palle
81 1.1 palle #define MAP_DTLB 0x1
82 1.1 palle #define MAP_ITLB 0x2
83 1.1 palle
84 1.4 palle #ifndef _LOCORE
85 1.1 palle struct tsb_desc {
86 1.1 palle uint16_t td_idxpgsz;
87 1.1 palle uint16_t td_assoc;
88 1.1 palle uint32_t td_size;
89 1.1 palle uint32_t td_ctxidx;
90 1.1 palle uint32_t td_pgsz;
91 1.1 palle paddr_t td_pa;
92 1.1 palle uint64_t td_reserved;
93 1.1 palle };
94 1.1 palle
95 1.5.2.1 skrll struct mmufsa {
96 1.5.2.1 skrll uint64_t ift; /* instruction fault type */
97 1.5.2.1 skrll uint64_t ifa; /* instruction fault address */
98 1.5.2.1 skrll uint64_t ifc; /* instruction fault context */
99 1.5.2.1 skrll uint64_t reserved1[5]; /* reserved */
100 1.5.2.1 skrll uint64_t dft; /* data fault type */
101 1.5.2.1 skrll uint64_t dfa; /* data fault address */
102 1.5.2.1 skrll uint64_t dfc; /* data fault context */
103 1.5.2.1 skrll uint64_t reserved2[5]; /* reserved */
104 1.5.2.1 skrll };
105 1.5.2.1 skrll
106 1.1 palle int64_t hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr);
107 1.1 palle int64_t hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr);
108 1.4 palle #endif
109 1.1 palle
110 1.1 palle /*
111 1.1 palle * Cache and memory services
112 1.1 palle */
113 1.1 palle
114 1.4 palle #ifndef _LOCORE
115 1.1 palle int64_t hv_mem_scrub(paddr_t raddr, psize_t length);
116 1.1 palle int64_t hv_mem_sync(paddr_t raddr, psize_t length);
117 1.4 palle #endif
118 1.1 palle
119 1.1 palle /*
120 1.1 palle * Device interrupt services
121 1.1 palle */
122 1.1 palle
123 1.4 palle #ifndef _LOCORE
124 1.1 palle int64_t hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino,
125 1.1 palle uint64_t *sysino);
126 1.1 palle int64_t hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled);
127 1.1 palle int64_t hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled);
128 1.1 palle int64_t hv_intr_getstate(uint64_t sysino, uint64_t *intr_state);
129 1.1 palle int64_t hv_intr_setstate(uint64_t sysino, uint64_t intr_state);
130 1.1 palle int64_t hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid);
131 1.1 palle int64_t hv_intr_settarget(uint64_t sysino, uint64_t cpuid);
132 1.4 palle #endif
133 1.1 palle
134 1.1 palle #define INTR_DISABLED 0
135 1.1 palle #define INTR_ENABLED 1
136 1.1 palle
137 1.1 palle #define INTR_IDLE 0
138 1.1 palle #define INTR_RECEIVED 1
139 1.1 palle #define INTR_DELIVERED 2
140 1.1 palle
141 1.4 palle #ifndef _LOCORE
142 1.1 palle int64_t hv_vintr_getcookie(uint64_t devhandle, uint64_t devino,
143 1.1 palle uint64_t *cookie_value);
144 1.1 palle int64_t hv_vintr_setcookie(uint64_t devhandle, uint64_t devino,
145 1.1 palle uint64_t cookie_value);
146 1.1 palle int64_t hv_vintr_getenabled(uint64_t devhandle, uint64_t devino,
147 1.1 palle uint64_t *intr_enabled);
148 1.1 palle int64_t hv_vintr_setenabled(uint64_t devhandle, uint64_t devino,
149 1.1 palle uint64_t intr_enabled);
150 1.1 palle int64_t hv_vintr_getstate(uint64_t devhandle, uint64_t devino,
151 1.1 palle uint64_t *intr_state);
152 1.1 palle int64_t hv_vintr_setstate(uint64_t devhandle, uint64_t devino,
153 1.1 palle uint64_t intr_state);
154 1.1 palle int64_t hv_vintr_gettarget(uint64_t devhandle, uint64_t devino,
155 1.1 palle uint64_t *cpuid);
156 1.1 palle int64_t hv_vintr_settarget(uint64_t devhandle, uint64_t devino,
157 1.1 palle uint64_t cpuid);
158 1.4 palle #endif
159 1.1 palle
160 1.1 palle /*
161 1.1 palle * Time of day services
162 1.1 palle */
163 1.1 palle
164 1.4 palle #ifndef _LOCORE
165 1.1 palle int64_t hv_tod_get(uint64_t *tod);
166 1.1 palle int64_t hv_tod_set(uint64_t tod);
167 1.4 palle #endif
168 1.1 palle
169 1.1 palle /*
170 1.1 palle * Console services
171 1.1 palle */
172 1.1 palle
173 1.4 palle #ifndef _LOCORE
174 1.1 palle int64_t hv_cons_getchar(int64_t *ch);
175 1.1 palle int64_t hv_cons_putchar(int64_t ch);
176 1.1 palle int64_t hv_api_putchar(int64_t ch);
177 1.4 palle #endif
178 1.1 palle
179 1.1 palle #define CONS_BREAK -1
180 1.1 palle #define CONS_HUP -2
181 1.1 palle
182 1.1 palle /*
183 1.1 palle * Domain state services
184 1.1 palle */
185 1.1 palle
186 1.4 palle #ifndef _LOCORE
187 1.1 palle int64_t hv_soft_state_set(uint64_t software_state,
188 1.1 palle paddr_t software_description_ptr);
189 1.4 palle #endif
190 1.1 palle
191 1.1 palle #define SIS_NORMAL 0x1
192 1.1 palle #define SIS_TRANSITION 0x2
193 1.1 palle
194 1.1 palle /*
195 1.1 palle * PCI I/O services
196 1.1 palle */
197 1.1 palle
198 1.4 palle #ifndef _LOCORE
199 1.1 palle int64_t hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid,
200 1.1 palle uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p,
201 1.1 palle uint64_t *nttes_mapped);
202 1.1 palle int64_t hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid,
203 1.1 palle uint64_t nttes, uint64_t *nttes_demapped);
204 1.1 palle int64_t hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid,
205 1.1 palle uint64_t *io_attributes, paddr_t *r_addr);
206 1.1 palle int64_t hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr,
207 1.1 palle uint64_t io_attributes, uint64_t *io_addr);
208 1.1 palle
209 1.1 palle int64_t hv_pci_config_get(uint64_t devhandle, uint64_t pci_device,
210 1.1 palle uint64_t pci_config_offset, uint64_t size,
211 1.1 palle uint64_t *error_flag, uint64_t *data);
212 1.1 palle int64_t hv_pci_config_put(uint64_t devhandle, uint64_t pci_device,
213 1.1 palle uint64_t pci_config_offset, uint64_t size, uint64_t data,
214 1.1 palle uint64_t *error_flag);
215 1.4 palle #endif
216 1.1 palle
217 1.1 palle #define PCI_MAP_ATTR_READ 0x01 /* From memory */
218 1.1 palle #define PCI_MAP_ATTR_WRITE 0x02 /* To memory */
219 1.1 palle
220 1.1 palle /*
221 1.1 palle * PCI MSI services
222 1.1 palle */
223 1.1 palle
224 1.4 palle #ifndef _LOCORE
225 1.1 palle int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid,
226 1.1 palle uint64_t r_addr, uint64_t nentries);
227 1.1 palle int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid,
228 1.1 palle uint64_t *r_addr, uint64_t *nentries);
229 1.1 palle
230 1.1 palle int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid,
231 1.1 palle uint64_t *msiqvalid);
232 1.1 palle int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid,
233 1.1 palle uint64_t msiqvalid);
234 1.4 palle #endif
235 1.1 palle
236 1.1 palle #define PCI_MSIQ_INVALID 0
237 1.1 palle #define PCI_MSIQ_VALID 1
238 1.1 palle
239 1.4 palle #ifndef _LOCORE
240 1.1 palle int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid,
241 1.1 palle uint64_t *msiqstate);
242 1.1 palle int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid,
243 1.1 palle uint64_t msiqstate);
244 1.4 palle #endif
245 1.1 palle
246 1.1 palle #define PCI_MSIQSTATE_IDLE 0
247 1.1 palle #define PCI_MSIQSTATE_ERROR 1
248 1.1 palle
249 1.4 palle #ifndef _LOCORE
250 1.1 palle int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid,
251 1.1 palle uint64_t *msiqhead);
252 1.1 palle int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid,
253 1.1 palle uint64_t msiqhead);
254 1.1 palle int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid,
255 1.1 palle uint64_t *msiqtail);
256 1.1 palle
257 1.1 palle int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum,
258 1.1 palle uint64_t *msivalidstate);
259 1.1 palle int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum,
260 1.1 palle uint64_t msivalidstate);
261 1.4 palle #endif
262 1.1 palle
263 1.1 palle #define PCI_MSI_INVALID 0
264 1.1 palle #define PCI_MSI_VALID 1
265 1.1 palle
266 1.4 palle #ifndef _LOCORE
267 1.1 palle int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum,
268 1.1 palle uint64_t *msiqid);
269 1.1 palle int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum,
270 1.1 palle uint64_t msitype, uint64_t msiqid);
271 1.1 palle
272 1.1 palle int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum,
273 1.1 palle uint64_t *msistate);
274 1.1 palle int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum,
275 1.1 palle uint64_t msistate);
276 1.4 palle #endif
277 1.1 palle
278 1.1 palle #define PCI_MSISTATE_IDLE 0
279 1.1 palle #define PCI_MSISTATE_DELIVERED 1
280 1.1 palle
281 1.4 palle #ifndef _LOCORE
282 1.1 palle int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg,
283 1.1 palle uint64_t *msiqid);
284 1.1 palle int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg,
285 1.1 palle uint64_t msiqid);
286 1.1 palle
287 1.1 palle int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg,
288 1.1 palle uint64_t *msgvalidstate);
289 1.1 palle int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg,
290 1.1 palle uint64_t msgvalidstate);
291 1.4 palle #endif
292 1.1 palle
293 1.1 palle #define PCIE_MSG_INVALID 0
294 1.1 palle #define PCIE_MSG_VALID 1
295 1.1 palle
296 1.1 palle #define PCIE_PME_MSG 0x18
297 1.1 palle #define PCIE_PME_ACK_MSG 0x1b
298 1.1 palle #define PCIE_CORR_MSG 0x30
299 1.1 palle #define PCIE_NONFATAL_MSG 0x31
300 1.1 palle #define PCIE_FATAL_MSG 0x32
301 1.1 palle
302 1.1 palle /*
303 1.1 palle * Logical Domain Channel services
304 1.1 palle */
305 1.1 palle
306 1.4 palle #ifndef _LOCORE
307 1.1 palle int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr,
308 1.1 palle uint64_t nentries);
309 1.1 palle int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
310 1.1 palle uint64_t *nentries);
311 1.1 palle int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset,
312 1.1 palle uint64_t *tail_offset, uint64_t *channel_state);
313 1.1 palle int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset);
314 1.1 palle int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr,
315 1.1 palle uint64_t nentries);
316 1.1 palle int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr,
317 1.1 palle uint64_t *nentries);
318 1.1 palle int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset,
319 1.1 palle uint64_t *tail_offset, uint64_t *channel_state);
320 1.1 palle int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset);
321 1.4 palle #endif
322 1.1 palle
323 1.1 palle #define LDC_CHANNEL_DOWN 0
324 1.1 palle #define LDC_CHANNEL_UP 1
325 1.1 palle #define LDC_CHANNEL_RESET 2
326 1.1 palle
327 1.4 palle #ifndef _LOCORE
328 1.1 palle int64_t hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr,
329 1.1 palle uint64_t nentries);
330 1.1 palle int64_t hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr,
331 1.1 palle uint64_t *nentries);
332 1.1 palle int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie,
333 1.1 palle paddr_t raddr, psize_t length, psize_t *ret_length);
334 1.4 palle #endif
335 1.1 palle
336 1.1 palle #define LDC_COPY_IN 0
337 1.1 palle #define LDC_COPY_OUT 1
338 1.1 palle
339 1.4 palle #ifndef _LOCORE
340 1.1 palle int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr,
341 1.1 palle uint64_t *perms);
342 1.1 palle int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms);
343 1.4 palle #endif
344 1.1 palle
345 1.1 palle /*
346 1.1 palle * Cryptographic services
347 1.1 palle */
348 1.1 palle
349 1.4 palle #ifndef _LOCORE
350 1.1 palle int64_t hv_rng_get_diag_control(void);
351 1.1 palle int64_t hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta);
352 1.1 palle int64_t hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout,
353 1.1 palle uint64_t *delta);
354 1.4 palle #endif
355 1.1 palle
356 1.1 palle #define RNG_STATE_UNCONFIGURED 0
357 1.1 palle #define RNG_STATE_CONFIGURED 1
358 1.1 palle #define RNG_STATE_HEALTHCHECK 2
359 1.1 palle #define RNG_STATE_ERROR 3
360 1.1 palle
361 1.4 palle #ifndef _LOCORE
362 1.1 palle int64_t hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta);
363 1.1 palle int64_t hv_rng_data_read(paddr_t raddr, uint64_t *delta);
364 1.4 palle #endif
365 1.1 palle
366 1.1 palle /*
367 1.1 palle * Error codes
368 1.1 palle */
369 1.1 palle
370 1.1 palle #define H_EOK 0
371 1.1 palle #define H_ENOCPU 1
372 1.1 palle #define H_ENORADDR 2
373 1.1 palle #define H_ENOINTR 3
374 1.1 palle #define H_EBADPGSZ 4
375 1.1 palle #define H_EBADTSB 5
376 1.1 palle #define H_EINVAL 6
377 1.1 palle #define H_EBADTRAP 7
378 1.1 palle #define H_EBADALIGN 8
379 1.1 palle #define H_EWOULDBLOCK 9
380 1.1 palle #define H_ENOACCESS 10
381 1.1 palle #define H_EIO 11
382 1.1 palle #define H_ECPUERROR 12
383 1.1 palle #define H_ENOTSUPPORTED 13
384 1.1 palle #define H_ENOMAP 14
385 1.1 palle #define H_ETOOMANY 15
386 1.1 palle #define H_ECHANNEL 16
387 1.3 palle
388 1.3 palle #endif /* _HYPERVISOR_H_ */
389