1 1.42 rin /* $NetBSD: xen_ipi.c,v 1.42 2023/11/06 17:01:07 rin Exp $ */ 2 1.2 cherry 3 1.2 cherry /*- 4 1.34 ad * Copyright (c) 2011, 2019 The NetBSD Foundation, Inc. 5 1.2 cherry * All rights reserved. 6 1.2 cherry * 7 1.2 cherry * This code is derived from software contributed to The NetBSD Foundation 8 1.2 cherry * by Cherry G. Mathew <cherry (at) zyx.in> 9 1.2 cherry * 10 1.2 cherry * Redistribution and use in source and binary forms, with or without 11 1.2 cherry * modification, are permitted provided that the following conditions 12 1.2 cherry * are met: 13 1.2 cherry * 1. Redistributions of source code must retain the above copyright 14 1.2 cherry * notice, this list of conditions and the following disclaimer. 15 1.2 cherry * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 cherry * notice, this list of conditions and the following disclaimer in the 17 1.2 cherry * documentation and/or other materials provided with the distribution. 18 1.2 cherry * 19 1.2 cherry * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 cherry * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 cherry * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 cherry * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 cherry * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 cherry * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 cherry * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 cherry * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 cherry * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 cherry * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 cherry * POSSIBILITY OF SUCH DAMAGE. 30 1.2 cherry */ 31 1.2 cherry 32 1.2 cherry #include <sys/cdefs.h> /* RCS ID macro */ 33 1.2 cherry 34 1.2 cherry /* 35 1.2 cherry * Based on: x86/ipi.c 36 1.2 cherry */ 37 1.2 cherry 38 1.42 rin __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.42 2023/11/06 17:01:07 rin Exp $"); 39 1.24 jdolecek 40 1.24 jdolecek #include "opt_ddb.h" 41 1.2 cherry 42 1.2 cherry #include <sys/types.h> 43 1.2 cherry 44 1.2 cherry #include <sys/atomic.h> 45 1.10 bouyer #include <sys/cpu.h> 46 1.2 cherry #include <sys/mutex.h> 47 1.2 cherry #include <sys/device.h> 48 1.2 cherry #include <sys/xcall.h> 49 1.18 rmind #include <sys/ipi.h> 50 1.2 cherry #include <sys/errno.h> 51 1.2 cherry #include <sys/systm.h> 52 1.2 cherry 53 1.16 dsl #include <x86/fpu.h> 54 1.2 cherry #include <machine/frame.h> 55 1.2 cherry #include <machine/segments.h> 56 1.2 cherry 57 1.11 cherry #include <xen/evtchn.h> 58 1.2 cherry #include <xen/intr.h> 59 1.2 cherry #include <xen/intrdefs.h> 60 1.2 cherry #include <xen/hypervisor.h> 61 1.32 cherry #include <xen/include/public/vcpu.h> 62 1.2 cherry 63 1.24 jdolecek #ifdef DDB 64 1.2 cherry extern void ddb_ipi(struct trapframe); 65 1.24 jdolecek static void xen_ipi_ddb(struct cpu_info *, struct intrframe *); 66 1.24 jdolecek #endif 67 1.2 cherry 68 1.2 cherry static void xen_ipi_halt(struct cpu_info *, struct intrframe *); 69 1.14 christos static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *); 70 1.2 cherry static void xen_ipi_xcall(struct cpu_info *, struct intrframe *); 71 1.6 cherry static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *); 72 1.18 rmind static void xen_ipi_generic(struct cpu_info *, struct intrframe *); 73 1.34 ad static void xen_ipi_ast(struct cpu_info *, struct intrframe *); 74 1.38 bouyer static void xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe *); 75 1.2 cherry 76 1.38 bouyer static void (*xen_ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) = 77 1.2 cherry { /* In order of priority (see: xen/include/intrdefs.h */ 78 1.2 cherry xen_ipi_halt, 79 1.14 christos xen_ipi_synch_fpu, 80 1.24 jdolecek #ifdef DDB 81 1.2 cherry xen_ipi_ddb, 82 1.24 jdolecek #else 83 1.24 jdolecek NULL, 84 1.24 jdolecek #endif 85 1.6 cherry xen_ipi_xcall, 86 1.18 rmind xen_ipi_hvcb, 87 1.18 rmind xen_ipi_generic, 88 1.38 bouyer xen_ipi_ast, 89 1.38 bouyer xen_ipi_kpreempt 90 1.2 cherry }; 91 1.2 cherry 92 1.23 cherry static int 93 1.41 riastrad xen_ipi_handler(void *arg, struct intrframe *regs) 94 1.2 cherry { 95 1.2 cherry uint32_t pending; 96 1.2 cherry int bit; 97 1.23 cherry struct cpu_info *ci; 98 1.2 cherry 99 1.23 cherry ci = curcpu(); 100 1.36 bouyer 101 1.36 bouyer KASSERT(ci == arg); 102 1.2 cherry pending = atomic_swap_32(&ci->ci_ipis, 0); 103 1.2 cherry 104 1.2 cherry KDASSERT((pending >> XEN_NIPIS) == 0); 105 1.2 cherry while ((bit = ffs(pending)) != 0) { 106 1.2 cherry bit--; 107 1.2 cherry pending &= ~(1 << bit); 108 1.2 cherry ci->ci_ipi_events[bit].ev_count++; 109 1.38 bouyer if (xen_ipifunc[bit] != NULL) { 110 1.38 bouyer (*xen_ipifunc[bit])(ci, regs); 111 1.3 cherry } else { 112 1.38 bouyer panic("xen_ipifunc[%d] unsupported!\n", bit); 113 1.2 cherry /* NOTREACHED */ 114 1.2 cherry } 115 1.2 cherry } 116 1.23 cherry 117 1.23 cherry return 0; 118 1.2 cherry } 119 1.2 cherry 120 1.2 cherry /* Must be called once for every cpu that expects to send/recv ipis */ 121 1.2 cherry void 122 1.2 cherry xen_ipi_init(void) 123 1.2 cherry { 124 1.2 cherry cpuid_t vcpu; 125 1.2 cherry evtchn_port_t evtchn; 126 1.2 cherry struct cpu_info *ci; 127 1.25 jdolecek char intr_xname[INTRDEVNAMEBUF]; 128 1.2 cherry 129 1.2 cherry ci = curcpu(); 130 1.2 cherry 131 1.38 bouyer vcpu = ci->ci_vcpuid; 132 1.7 cegger KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS); 133 1.2 cherry 134 1.3 cherry evtchn = bind_vcpu_to_evtch(vcpu); 135 1.3 cherry ci->ci_ipi_evtchn = evtchn; 136 1.2 cherry 137 1.2 cherry KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS); 138 1.2 cherry 139 1.25 jdolecek snprintf(intr_xname, sizeof(intr_xname), "%s ipi", 140 1.25 jdolecek device_xname(ci->ci_dev)); 141 1.25 jdolecek 142 1.41 riastrad if (event_set_handler(evtchn, 143 1.41 riastrad __FPTRCAST(int (*)(void *), xen_ipi_handler), ci, IPL_HIGH, 144 1.41 riastrad NULL, intr_xname, true, ci) == NULL) { 145 1.23 cherry panic("%s: unable to register ipi handler\n", __func__); 146 1.2 cherry /* NOTREACHED */ 147 1.2 cherry } 148 1.2 cherry 149 1.28 cherry hypervisor_unmask_event(evtchn); 150 1.2 cherry } 151 1.2 cherry 152 1.42 rin static inline bool __diagused 153 1.2 cherry valid_ipimask(uint32_t ipimask) 154 1.2 cherry { 155 1.18 rmind uint32_t masks = XEN_IPI_GENERIC | XEN_IPI_HVCB | XEN_IPI_XCALL | 156 1.6 cherry XEN_IPI_DDB | XEN_IPI_SYNCH_FPU | 157 1.38 bouyer XEN_IPI_HALT | XEN_IPI_AST | XEN_IPI_KPREEMPT; 158 1.2 cherry 159 1.2 cherry if (ipimask & ~masks) { 160 1.2 cherry return false; 161 1.3 cherry } else { 162 1.2 cherry return true; 163 1.2 cherry } 164 1.2 cherry 165 1.2 cherry } 166 1.2 cherry 167 1.2 cherry int 168 1.2 cherry xen_send_ipi(struct cpu_info *ci, uint32_t ipimask) 169 1.2 cherry { 170 1.2 cherry evtchn_port_t evtchn; 171 1.2 cherry 172 1.26 bouyer KASSERT(ci != NULL && ci != curcpu()); 173 1.2 cherry 174 1.4 cherry if ((ci->ci_flags & CPUF_RUNNING) == 0) { 175 1.2 cherry return ENOENT; 176 1.2 cherry } 177 1.2 cherry 178 1.2 cherry evtchn = ci->ci_ipi_evtchn; 179 1.3 cherry 180 1.3 cherry KASSERTMSG(valid_ipimask(ipimask) == true, 181 1.5 jym "xen_send_ipi() called with invalid ipimask\n"); 182 1.2 cherry 183 1.2 cherry atomic_or_32(&ci->ci_ipis, ipimask); 184 1.2 cherry hypervisor_notify_via_evtchn(evtchn); 185 1.2 cherry 186 1.2 cherry return 0; 187 1.2 cherry } 188 1.2 cherry 189 1.2 cherry void 190 1.2 cherry xen_broadcast_ipi(uint32_t ipimask) 191 1.2 cherry { 192 1.2 cherry struct cpu_info *ci, *self = curcpu(); 193 1.2 cherry CPU_INFO_ITERATOR cii; 194 1.2 cherry 195 1.3 cherry KASSERTMSG(valid_ipimask(ipimask) == true, 196 1.5 jym "xen_broadcast_ipi() called with invalid ipimask\n"); 197 1.2 cherry 198 1.2 cherry /* 199 1.2 cherry * XXX-cherry: there's an implicit broadcast sending order 200 1.2 cherry * which I dislike. Randomise this ? :-) 201 1.2 cherry */ 202 1.2 cherry 203 1.2 cherry for (CPU_INFO_FOREACH(cii, ci)) { 204 1.2 cherry if (ci == NULL) 205 1.2 cherry continue; 206 1.2 cherry if (ci == self) 207 1.2 cherry continue; 208 1.2 cherry if (ci->ci_data.cpu_idlelwp == NULL) 209 1.2 cherry continue; 210 1.2 cherry if ((ci->ci_flags & CPUF_PRESENT) == 0) 211 1.2 cherry continue; 212 1.2 cherry if (ci->ci_flags & (CPUF_RUNNING)) { 213 1.2 cherry if (0 != xen_send_ipi(ci, ipimask)) { 214 1.2 cherry panic("xen_ipi of %x from %s to %s failed\n", 215 1.2 cherry ipimask, cpu_name(curcpu()), 216 1.2 cherry cpu_name(ci)); 217 1.2 cherry } 218 1.2 cherry } 219 1.2 cherry } 220 1.2 cherry } 221 1.2 cherry 222 1.2 cherry /* MD wrapper for the xcall(9) callback. */ 223 1.2 cherry 224 1.2 cherry static void 225 1.2 cherry xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf) 226 1.2 cherry { 227 1.2 cherry KASSERT(ci == curcpu()); 228 1.2 cherry KASSERT(ci != NULL); 229 1.38 bouyer if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL)) { 230 1.25 jdolecek panic("%s shutdown failed.\n", device_xname(ci->ci_dev)); 231 1.2 cherry } 232 1.2 cherry 233 1.2 cherry } 234 1.2 cherry 235 1.2 cherry static void 236 1.14 christos xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf) 237 1.14 christos { 238 1.14 christos KASSERT(ci != NULL); 239 1.14 christos KASSERT(intrf != NULL); 240 1.14 christos 241 1.33 maxv panic("%s: impossible", __func__); 242 1.14 christos } 243 1.14 christos 244 1.24 jdolecek #ifdef DDB 245 1.14 christos static void 246 1.2 cherry xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf) 247 1.2 cherry { 248 1.2 cherry KASSERT(ci != NULL); 249 1.2 cherry KASSERT(intrf != NULL); 250 1.2 cherry 251 1.2 cherry #ifdef __x86_64__ 252 1.2 cherry ddb_ipi(intrf->if_tf); 253 1.2 cherry #else 254 1.2 cherry struct trapframe tf; 255 1.2 cherry tf.tf_gs = intrf->if_gs; 256 1.2 cherry tf.tf_fs = intrf->if_fs; 257 1.2 cherry tf.tf_es = intrf->if_es; 258 1.2 cherry tf.tf_ds = intrf->if_ds; 259 1.2 cherry tf.tf_edi = intrf->if_edi; 260 1.2 cherry tf.tf_esi = intrf->if_esi; 261 1.2 cherry tf.tf_ebp = intrf->if_ebp; 262 1.2 cherry tf.tf_ebx = intrf->if_ebx; 263 1.2 cherry tf.tf_ecx = intrf->if_ecx; 264 1.2 cherry tf.tf_eax = intrf->if_eax; 265 1.2 cherry tf.tf_trapno = intrf->__if_trapno; 266 1.2 cherry tf.tf_err = intrf->__if_err; 267 1.2 cherry tf.tf_eip = intrf->if_eip; 268 1.2 cherry tf.tf_cs = intrf->if_cs; 269 1.2 cherry tf.tf_eflags = intrf->if_eflags; 270 1.2 cherry tf.tf_esp = intrf->if_esp; 271 1.2 cherry tf.tf_ss = intrf->if_ss; 272 1.2 cherry 273 1.22 maxv ddb_ipi(tf); 274 1.2 cherry #endif 275 1.2 cherry } 276 1.24 jdolecek #endif /* DDB */ 277 1.2 cherry 278 1.2 cherry static void 279 1.2 cherry xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf) 280 1.2 cherry { 281 1.2 cherry KASSERT(ci != NULL); 282 1.2 cherry KASSERT(intrf != NULL); 283 1.2 cherry 284 1.2 cherry xc_ipi_handler(); 285 1.2 cherry } 286 1.2 cherry 287 1.34 ad static void 288 1.34 ad xen_ipi_ast(struct cpu_info *ci, struct intrframe *intrf) 289 1.34 ad { 290 1.34 ad KASSERT(ci != NULL); 291 1.34 ad KASSERT(intrf != NULL); 292 1.34 ad 293 1.35 ad aston(ci->ci_onproc); 294 1.34 ad } 295 1.34 ad 296 1.38 bouyer static void 297 1.38 bouyer xen_ipi_generic(struct cpu_info *ci, struct intrframe *intrf) 298 1.38 bouyer { 299 1.38 bouyer KASSERT(ci != NULL); 300 1.38 bouyer KASSERT(intrf != NULL); 301 1.38 bouyer ipi_cpu_handler(); 302 1.38 bouyer } 303 1.38 bouyer 304 1.38 bouyer static void 305 1.38 bouyer xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf) 306 1.38 bouyer { 307 1.38 bouyer KASSERT(ci != NULL); 308 1.38 bouyer KASSERT(intrf != NULL); 309 1.38 bouyer KASSERT(ci == curcpu()); 310 1.38 bouyer KASSERT(!ci->ci_vcpu->evtchn_upcall_mask); 311 1.38 bouyer 312 1.38 bouyer hypervisor_force_callback(); 313 1.38 bouyer } 314 1.38 bouyer 315 1.38 bouyer static void 316 1.38 bouyer xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe * intrf) 317 1.38 bouyer { 318 1.38 bouyer softint_trigger(1 << SIR_PREEMPT); 319 1.38 bouyer } 320 1.38 bouyer 321 1.38 bouyer #ifdef XENPV 322 1.2 cherry void 323 1.2 cherry xc_send_ipi(struct cpu_info *ci) 324 1.2 cherry { 325 1.2 cherry 326 1.2 cherry KASSERT(kpreempt_disabled()); 327 1.2 cherry KASSERT(curcpu() != ci); 328 1.2 cherry if (ci) { 329 1.2 cherry if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) { 330 1.2 cherry panic("xen_send_ipi(XEN_IPI_XCALL) failed\n"); 331 1.3 cherry } 332 1.2 cherry } else { 333 1.2 cherry xen_broadcast_ipi(XEN_IPI_XCALL); 334 1.2 cherry } 335 1.2 cherry } 336 1.6 cherry 337 1.18 rmind void 338 1.18 rmind cpu_ipi(struct cpu_info *ci) 339 1.18 rmind { 340 1.18 rmind KASSERT(kpreempt_disabled()); 341 1.18 rmind KASSERT(curcpu() != ci); 342 1.18 rmind if (ci) { 343 1.18 rmind if (0 != xen_send_ipi(ci, XEN_IPI_GENERIC)) { 344 1.18 rmind panic("xen_send_ipi(XEN_IPI_GENERIC) failed\n"); 345 1.18 rmind } 346 1.18 rmind } else { 347 1.18 rmind xen_broadcast_ipi(XEN_IPI_GENERIC); 348 1.18 rmind } 349 1.18 rmind } 350 1.38 bouyer #endif /* XENPV */ 351