1 /* $NetBSD: vmbus.c,v 1.20 2025/09/06 02:56:07 riastradh Exp $ */ 2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */ 3 4 /*- 5 * Copyright (c) 2009-2012 Microsoft Corp. 6 * Copyright (c) 2012 NetApp Inc. 7 * Copyright (c) 2012 Citrix Inc. 8 * Copyright (c) 2016 Mike Belopuhov <mike (at) esdenera.com> 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * The OpenBSD port was done under funding by Esdenera Networks GmbH. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.20 2025/09/06 02:56:07 riastradh Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/atomic.h> 44 #include <sys/bitops.h> 45 #include <sys/bus.h> 46 #include <sys/cpu.h> 47 #include <sys/intr.h> 48 #include <sys/kmem.h> 49 #include <sys/kthread.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/xcall.h> 53 #include <sys/paravirt_membar.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <dev/hyperv/vmbusvar.h> 58 59 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */ 60 61 /* Command submission flags */ 62 #define HCF_SLEEPOK 0x0000 63 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */ 64 #define HCF_NOREPLY 0x0004 65 66 static void vmbus_attach_deferred(device_t); 67 static int vmbus_attach_print(void *, const char *); 68 static int vmbus_alloc_dma(struct vmbus_softc *); 69 static void vmbus_free_dma(struct vmbus_softc *); 70 static int vmbus_init_interrupts(struct vmbus_softc *); 71 static void vmbus_deinit_interrupts(struct vmbus_softc *); 72 static void vmbus_init_interrupts_pcpu(void *, void *); 73 static void vmbus_deinit_interrupts_pcpu(void *, void *); 74 75 static int vmbus_connect(struct vmbus_softc *); 76 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t, 77 int); 78 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t); 79 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *); 80 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t); 81 static void vmbus_event_proc(void *, struct cpu_info *); 82 static void vmbus_event_proc_compat(void *, struct cpu_info *); 83 static void vmbus_message_proc(void *, struct cpu_info *); 84 static void vmbus_message_softintr(void *); 85 static void vmbus_channel_response(struct vmbus_softc *, 86 struct vmbus_chanmsg_hdr *); 87 static void vmbus_channel_offer(struct vmbus_softc *, 88 struct vmbus_chanmsg_hdr *); 89 static void vmbus_channel_rescind(struct vmbus_softc *, 90 struct vmbus_chanmsg_hdr *); 91 static void vmbus_channel_delivered(struct vmbus_softc *, 92 struct vmbus_chanmsg_hdr *); 93 static int vmbus_channel_scan(struct vmbus_softc *); 94 static void vmbus_channel_cpu_default(struct vmbus_channel *); 95 static void vmbus_process_offer(struct vmbus_softc *, 96 struct vmbus_chanmsg_choffer *); 97 static void vmbus_process_rescind(struct vmbus_softc *, 98 struct vmbus_chanmsg_chrescind *); 99 static struct vmbus_channel * 100 vmbus_channel_lookup(struct vmbus_softc *, uint32_t); 101 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t); 102 static void vmbus_channel_ring_destroy(struct vmbus_channel *); 103 static void vmbus_channel_detach(struct vmbus_channel *); 104 static void vmbus_chevq_enqueue(struct vmbus_softc *, int, void *); 105 static void vmbus_process_chevq(void *); 106 static void vmbus_chevq_thread(void *); 107 static void vmbus_devq_enqueue(struct vmbus_softc *, int, 108 struct vmbus_channel *); 109 static void vmbus_process_devq(void *); 110 static void vmbus_devq_thread(void *); 111 static void vmbus_subchannel_devq_thread(void *); 112 113 static struct vmbus_softc *vmbus_sc; 114 115 static const struct { 116 int hmd_response; 117 int hmd_request; 118 void (*hmd_handler)(struct vmbus_softc *, 119 struct vmbus_chanmsg_hdr *); 120 } vmbus_msg_dispatch[] = { 121 { 0, 0, NULL }, 122 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer }, 123 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind }, 124 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL }, 125 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered }, 126 { VMBUS_CHANMSG_CHOPEN, 0, NULL }, 127 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN, 128 vmbus_channel_response }, 129 { VMBUS_CHANMSG_CHCLOSE, 0, NULL }, 130 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL }, 131 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL }, 132 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN, 133 vmbus_channel_response }, 134 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL }, 135 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN, 136 vmbus_channel_response }, 137 { VMBUS_CHANMSG_CHFREE, 0, NULL }, 138 { VMBUS_CHANMSG_CONNECT, 0, NULL }, 139 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT, 140 vmbus_channel_response }, 141 { VMBUS_CHANMSG_DISCONNECT, 0, NULL }, 142 }; 143 144 const struct hyperv_guid hyperv_guid_network = { 145 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, 146 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e } 147 }; 148 149 const struct hyperv_guid hyperv_guid_ide = { 150 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 151 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 } 152 }; 153 154 const struct hyperv_guid hyperv_guid_scsi = { 155 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 156 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f } 157 }; 158 159 const struct hyperv_guid hyperv_guid_shutdown = { 160 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, 161 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb } 162 }; 163 164 const struct hyperv_guid hyperv_guid_timesync = { 165 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 166 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf } 167 }; 168 169 const struct hyperv_guid hyperv_guid_heartbeat = { 170 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 171 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d } 172 }; 173 174 const struct hyperv_guid hyperv_guid_kvp = { 175 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 176 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 } 177 }; 178 179 const struct hyperv_guid hyperv_guid_vss = { 180 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, 181 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 } 182 }; 183 184 const struct hyperv_guid hyperv_guid_dynmem = { 185 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46, 186 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 } 187 }; 188 189 const struct hyperv_guid hyperv_guid_mouse = { 190 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, 191 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a } 192 }; 193 194 const struct hyperv_guid hyperv_guid_kbd = { 195 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48, 196 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 } 197 }; 198 199 const struct hyperv_guid hyperv_guid_video = { 200 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, 201 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 } 202 }; 203 204 const struct hyperv_guid hyperv_guid_fc = { 205 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a, 206 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda } 207 }; 208 209 const struct hyperv_guid hyperv_guid_fcopy = { 210 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41, 211 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 } 212 }; 213 214 const struct hyperv_guid hyperv_guid_pcie = { 215 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44, 216 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f } 217 }; 218 219 const struct hyperv_guid hyperv_guid_netdir = { 220 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, 221 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 } 222 }; 223 224 const struct hyperv_guid hyperv_guid_rdesktop = { 225 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42, 226 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe } 227 }; 228 229 /* Automatic Virtual Machine Activation (AVMA) Services */ 230 const struct hyperv_guid hyperv_guid_avma1 = { 231 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40, 232 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 } 233 }; 234 235 const struct hyperv_guid hyperv_guid_avma2 = { 236 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b, 237 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b } 238 }; 239 240 const struct hyperv_guid hyperv_guid_avma3 = { 241 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11, 242 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e } 243 }; 244 245 const struct hyperv_guid hyperv_guid_avma4 = { 246 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a, 247 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 } 248 }; 249 250 int 251 vmbus_match(device_t parent, cfdata_t cf, void *aux) 252 { 253 254 if (cf->cf_unit != 0 || 255 !hyperv_hypercall_enabled() || 256 !hyperv_synic_supported()) 257 return 0; 258 259 return 1; 260 } 261 262 int 263 vmbus_attach(struct vmbus_softc *sc) 264 { 265 266 aprint_naive("\n"); 267 aprint_normal(": Hyper-V VMBus\n"); 268 269 vmbus_sc = sc; 270 271 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0, 272 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL); 273 hyperv_set_message_proc(vmbus_message_proc, sc); 274 275 sc->sc_chanmap = kmem_zalloc(sizeof(struct vmbus_channel *) * 276 VMBUS_CHAN_MAX, KM_SLEEP); 277 278 if (vmbus_alloc_dma(sc)) 279 goto cleanup; 280 281 if (vmbus_init_interrupts(sc)) 282 goto cleanup; 283 284 if (vmbus_connect(sc)) 285 goto cleanup; 286 287 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n", 288 VMBUS_VERSION_MAJOR(sc->sc_proto), 289 VMBUS_VERSION_MINOR(sc->sc_proto)); 290 291 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 292 sc->sc_proto == VMBUS_VERSION_WIN7) { 293 hyperv_set_event_proc(vmbus_event_proc_compat, sc); 294 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT; 295 } else { 296 hyperv_set_event_proc(vmbus_event_proc, sc); 297 sc->sc_channel_max = VMBUS_CHAN_MAX; 298 } 299 300 if (vmbus_channel_scan(sc)) 301 goto cleanup; 302 303 config_interrupts(sc->sc_dev, vmbus_attach_deferred); 304 305 return 0; 306 307 cleanup: 308 vmbus_deinit_interrupts(sc); 309 vmbus_free_dma(sc); 310 kmem_free(__UNVOLATILE(sc->sc_chanmap), 311 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX); 312 return -1; 313 } 314 315 static void 316 vmbus_attach_deferred(device_t self) 317 { 318 struct vmbus_softc *sc = device_private(self); 319 uint64_t xc; 320 321 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu, 322 sc, NULL); 323 xc_wait(xc); 324 } 325 326 int 327 vmbus_detach(struct vmbus_softc *sc, int flags) 328 { 329 330 vmbus_deinit_interrupts(sc); 331 vmbus_free_dma(sc); 332 kmem_free(__UNVOLATILE(sc->sc_chanmap), 333 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX); 334 335 return 0; 336 } 337 338 static int 339 vmbus_alloc_dma(struct vmbus_softc *sc) 340 { 341 CPU_INFO_ITERATOR cii; 342 struct cpu_info *ci; 343 struct vmbus_percpu_data *pd; 344 int i; 345 346 /* 347 * Per-CPU messages and event flags. 348 */ 349 for (CPU_INFO_FOREACH(cii, ci)) { 350 pd = &sc->sc_percpu[cpu_index(ci)]; 351 352 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma, 353 PAGE_SIZE, PAGE_SIZE, 0, 1); 354 if (pd->simp == NULL) 355 return ENOMEM; 356 357 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma, 358 PAGE_SIZE, PAGE_SIZE, 0, 1); 359 if (pd->siep == NULL) 360 return ENOMEM; 361 } 362 363 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma, 364 PAGE_SIZE, PAGE_SIZE, 0, 1); 365 if (sc->sc_events == NULL) 366 return ENOMEM; 367 sc->sc_wevents = (u_long *)sc->sc_events; 368 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2)); 369 370 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 371 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat, 372 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1); 373 if (sc->sc_monitor[i] == NULL) 374 return ENOMEM; 375 } 376 377 return 0; 378 } 379 380 static void 381 vmbus_free_dma(struct vmbus_softc *sc) 382 { 383 CPU_INFO_ITERATOR cii; 384 struct cpu_info *ci; 385 int i; 386 387 if (sc->sc_events != NULL) { 388 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL; 389 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma); 390 } 391 392 for (i = 0; i < __arraycount(sc->sc_monitor); i++) { 393 sc->sc_monitor[i] = NULL; 394 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]); 395 } 396 397 for (CPU_INFO_FOREACH(cii, ci)) { 398 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)]; 399 400 if (pd->simp != NULL) { 401 pd->simp = NULL; 402 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma); 403 } 404 if (pd->siep != NULL) { 405 pd->siep = NULL; 406 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma); 407 } 408 } 409 } 410 411 static int 412 vmbus_init_interrupts(struct vmbus_softc *sc) 413 { 414 uint64_t xc; 415 416 TAILQ_INIT(&sc->sc_reqs); 417 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET); 418 419 TAILQ_INIT(&sc->sc_rsps); 420 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET); 421 422 sc->sc_proto = VMBUS_VERSION_WS2008; 423 424 /* XXX event_tq */ 425 426 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 427 vmbus_message_softintr, sc); 428 if (sc->sc_msg_sih == NULL) 429 return -1; 430 431 kcpuset_create(&sc->sc_intr_cpuset, true); 432 if (cold) { 433 /* Initialize other CPUs later. */ 434 vmbus_init_interrupts_pcpu(sc, NULL); 435 } else { 436 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu, 437 sc, NULL); 438 xc_wait(xc); 439 } 440 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC); 441 442 return 0; 443 } 444 445 static void 446 vmbus_deinit_interrupts(struct vmbus_softc *sc) 447 { 448 uint64_t xc; 449 450 if (cold) { 451 vmbus_deinit_interrupts_pcpu(sc, NULL); 452 } else { 453 xc = xc_broadcast(0, vmbus_deinit_interrupts_pcpu, 454 sc, NULL); 455 xc_wait(xc); 456 } 457 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC); 458 459 /* XXX event_tq */ 460 461 if (sc->sc_msg_sih != NULL) { 462 softint_disestablish(sc->sc_msg_sih); 463 sc->sc_msg_sih = NULL; 464 } 465 } 466 467 static void 468 vmbus_init_interrupts_pcpu(void *arg1, void *arg2 __unused) 469 { 470 struct vmbus_softc *sc = arg1; 471 cpuid_t cpu; 472 int s; 473 474 s = splhigh(); 475 476 cpu = cpu_index(curcpu()); 477 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 478 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu); 479 vmbus_init_interrupts_md(sc, cpu); 480 vmbus_init_synic_md(sc, cpu); 481 } 482 483 splx(s); 484 } 485 486 static void 487 vmbus_deinit_interrupts_pcpu(void *arg1, void *arg2 __unused) 488 { 489 struct vmbus_softc *sc = arg1; 490 cpuid_t cpu; 491 int s; 492 493 s = splhigh(); 494 495 cpu = cpu_index(curcpu()); 496 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) { 497 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC)) 498 vmbus_deinit_synic_md(sc, cpu); 499 vmbus_deinit_interrupts_md(sc, cpu); 500 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu); 501 } 502 503 splx(s); 504 } 505 506 static int 507 vmbus_connect(struct vmbus_softc *sc) 508 { 509 static const uint32_t versions[] = { 510 VMBUS_VERSION_WIN8_1, 511 VMBUS_VERSION_WIN8, 512 VMBUS_VERSION_WIN7, 513 VMBUS_VERSION_WS2008 514 }; 515 struct vmbus_chanmsg_connect cmd; 516 struct vmbus_chanmsg_connect_resp rsp; 517 int i, rv; 518 519 memset(&cmd, 0, sizeof(cmd)); 520 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT; 521 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma); 522 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]); 523 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]); 524 525 memset(&rsp, 0, sizeof(rsp)); 526 527 for (i = 0; i < __arraycount(versions); i++) { 528 cmd.chm_ver = versions[i]; 529 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), 530 HCF_NOSLEEP); 531 if (rv) { 532 DPRINTF("%s: CONNECT failed\n", 533 device_xname(sc->sc_dev)); 534 return rv; 535 } 536 if (rsp.chm_done) { 537 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED); 538 sc->sc_proto = versions[i]; 539 sc->sc_handle = VMBUS_GPADL_START; 540 break; 541 } 542 } 543 if (i == __arraycount(versions)) { 544 device_printf(sc->sc_dev, 545 "failed to negotiate protocol version\n"); 546 return ENXIO; 547 } 548 549 return 0; 550 } 551 552 static int 553 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp, 554 size_t rsplen, int flags) 555 { 556 struct vmbus_msg *msg; 557 paddr_t pa; 558 int rv; 559 560 if (cmdlen > VMBUS_MSG_DSIZE_MAX) { 561 device_printf(sc->sc_dev, "payload too large (%zu)\n", 562 cmdlen); 563 return EMSGSIZE; 564 } 565 566 msg = pool_cache_get_paddr(sc->sc_msgpool, PR_WAITOK, &pa); 567 if (msg == NULL) { 568 device_printf(sc->sc_dev, "couldn't get msgpool\n"); 569 return ENOMEM; 570 } 571 memset(msg, 0, sizeof(*msg)); 572 msg->msg_req.hc_dsize = cmdlen; 573 memcpy(msg->msg_req.hc_data, cmd, cmdlen); 574 575 if (!(flags & HCF_NOREPLY)) { 576 msg->msg_rsp = rsp; 577 msg->msg_rsplen = rsplen; 578 } else 579 msg->msg_flags |= MSGF_NOQUEUE; 580 581 if (flags & HCF_NOSLEEP) 582 msg->msg_flags |= MSGF_NOSLEEP; 583 584 rv = vmbus_start(sc, msg, pa); 585 if (rv == 0) 586 rv = vmbus_reply(sc, msg); 587 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 588 return rv; 589 } 590 591 static int 592 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa) 593 { 594 const char *wchan = "hvstart"; 595 uint16_t status; 596 int wait_ms = 1; /* milliseconds */ 597 int i, s; 598 599 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE; 600 msg->msg_req.hc_msgtype = 1; 601 602 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 603 mutex_enter(&sc->sc_req_lock); 604 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry); 605 mutex_exit(&sc->sc_req_lock); 606 } 607 608 /* 609 * In order to cope with transient failures, e.g. insufficient 610 * resources on host side, we retry the post message Hypercall 611 * several times. 20 retries seem sufficient. 612 */ 613 #define HC_RETRY_MAX 20 614 #define HC_WAIT_MAX (2 * 1000) /* 2s */ 615 616 for (i = 0; i < HC_RETRY_MAX; i++) { 617 status = hyperv_hypercall_post_message( 618 msg_pa + offsetof(struct vmbus_msg, msg_req)); 619 if (status == HYPERCALL_STATUS_SUCCESS) 620 return 0; 621 622 if (msg->msg_flags & MSGF_NOSLEEP) { 623 DELAY(wait_ms * 1000); 624 s = splnet(); 625 hyperv_intr(); 626 splx(s); 627 } else 628 tsleep(wchan, PRIBIO, wchan, uimax(1, mstohz(wait_ms))); 629 630 if (wait_ms < HC_WAIT_MAX) 631 wait_ms *= 2; 632 } 633 634 #undef HC_RETRY_MAX 635 #undef HC_WAIT_MAX 636 637 device_printf(sc->sc_dev, 638 "posting vmbus message failed with %d\n", status); 639 640 if (!(msg->msg_flags & MSGF_NOQUEUE)) { 641 mutex_enter(&sc->sc_req_lock); 642 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 643 mutex_exit(&sc->sc_req_lock); 644 } 645 646 return EIO; 647 } 648 649 static int 650 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg) 651 { 652 struct vmbus_msg *m; 653 654 mutex_enter(&sc->sc_rsp_lock); 655 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) { 656 if (m == msg) { 657 mutex_exit(&sc->sc_rsp_lock); 658 return 1; 659 } 660 } 661 mutex_exit(&sc->sc_rsp_lock); 662 return 0; 663 } 664 665 static int 666 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg) 667 { 668 int s; 669 670 if (msg->msg_flags & MSGF_NOQUEUE) 671 return 0; 672 673 while (!vmbus_reply_done(sc, msg)) { 674 if (msg->msg_flags & MSGF_NOSLEEP) { 675 delay(1000); 676 s = splnet(); 677 hyperv_intr(); 678 splx(s); 679 } else 680 tsleep(msg, PRIBIO, "hvreply", uimax(1, mstohz(1))); 681 } 682 683 mutex_enter(&sc->sc_rsp_lock); 684 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry); 685 mutex_exit(&sc->sc_rsp_lock); 686 687 return 0; 688 } 689 690 static uint16_t 691 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa) 692 { 693 uint64_t status; 694 695 status = hyperv_hypercall_signal_event(con_pa); 696 return (uint16_t)status; 697 } 698 699 #if LONG_BIT == 64 700 #define ffsl(v) ffs64(v) 701 #elif LONG_BIT == 32 702 #define ffsl(v) ffs32(v) 703 #else 704 #error unsupport LONG_BIT 705 #endif /* LONG_BIT */ 706 707 static void 708 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents, 709 int maxrow) 710 { 711 struct vmbus_channel *ch; 712 u_long pending; 713 uint32_t chanid, chanid_base; 714 int row, chanid_ofs; 715 716 for (row = 0; row < maxrow; row++) { 717 if (revents[row] == 0) 718 continue; 719 720 pending = atomic_swap_ulong(&revents[row], 0); 721 pending &= ~sc->sc_evtmask[row]; 722 chanid_base = row * VMBUS_EVTFLAG_LEN; 723 724 while ((chanid_ofs = ffsl(pending)) != 0) { 725 chanid_ofs--; /* NOTE: ffs is 1-based */ 726 pending &= ~(1UL << chanid_ofs); 727 728 chanid = chanid_base + chanid_ofs; 729 /* vmbus channel protocol message */ 730 if (chanid == 0) 731 continue; 732 733 ch = sc->sc_chanmap[chanid]; 734 if (__predict_false(ch == NULL)) { 735 /* Channel is closed. */ 736 continue; 737 } 738 __insn_barrier(); 739 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) { 740 device_printf(sc->sc_dev, 741 "channel %d is not active\n", chanid); 742 continue; 743 } 744 ch->ch_evcnt.ev_count++; 745 vmbus_channel_schedule(ch); 746 } 747 } 748 } 749 750 static void 751 vmbus_event_proc(void *arg, struct cpu_info *ci) 752 { 753 struct vmbus_softc *sc = arg; 754 struct vmbus_evtflags *evt; 755 756 /* 757 * On Host with Win8 or above, the event page can be 758 * checked directly to get the id of the channel 759 * that has the pending interrupt. 760 */ 761 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 762 VMBUS_SINT_MESSAGE; 763 764 vmbus_event_flags_proc(sc, evt->evt_flags, 765 __arraycount(evt->evt_flags)); 766 } 767 768 static void 769 vmbus_event_proc_compat(void *arg, struct cpu_info *ci) 770 { 771 struct vmbus_softc *sc = arg; 772 struct vmbus_evtflags *evt; 773 774 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep + 775 VMBUS_SINT_MESSAGE; 776 777 if (test_bit(0, &evt->evt_flags[0])) { 778 clear_bit(0, &evt->evt_flags[0]); 779 /* 780 * receive size is 1/2 page and divide that by 4 bytes 781 */ 782 vmbus_event_flags_proc(sc, sc->sc_revents, 783 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN); 784 } 785 } 786 787 static void 788 vmbus_message_proc(void *arg, struct cpu_info *ci) 789 { 790 struct vmbus_softc *sc = arg; 791 struct vmbus_message *msg; 792 793 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp + 794 VMBUS_SINT_MESSAGE; 795 /* XXX bus_dmamap_sync(POSTREAD|POSTWRITE) on msg_type */ 796 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 797 if (__predict_true(!cold)) 798 softint_schedule_cpu(sc->sc_msg_sih, ci); 799 else 800 vmbus_message_softintr(sc); 801 } 802 } 803 804 static void 805 vmbus_message_softintr(void *arg) 806 { 807 struct vmbus_softc *sc = arg; 808 struct vmbus_message *msg; 809 struct vmbus_chanmsg_hdr *hdr; 810 uint32_t type; 811 cpuid_t cpu; 812 813 cpu = cpu_index(curcpu()); 814 815 for (;;) { 816 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp + 817 VMBUS_SINT_MESSAGE; 818 /* XXX bus_dmamap_sync(POSTREAD|POSTWRITE) on msg_type */ 819 if (msg->msg_type == HYPERV_MSGTYPE_NONE) 820 break; 821 822 /* XXX bus_dmamap_sync(POSTREAD) on msg_data */ 823 824 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data; 825 type = hdr->chm_type; 826 if (type >= VMBUS_CHANMSG_COUNT) { 827 device_printf(sc->sc_dev, 828 "unhandled message type %u flags %#x\n", type, 829 msg->msg_flags); 830 } else { 831 if (vmbus_msg_dispatch[type].hmd_handler) { 832 vmbus_msg_dispatch[type].hmd_handler(sc, hdr); 833 } else { 834 device_printf(sc->sc_dev, 835 "unhandled message type %u\n", type); 836 } 837 } 838 839 /* XXX bus_dmamap_sync(PREREAD) on msg_data */ 840 841 msg->msg_type = HYPERV_MSGTYPE_NONE; 842 /* XXX bus_dmamap_sync(PREWRITE|PREREAD) on msg_type */ 843 844 /* 845 * Ensure we tell the host that this message is done 846 * before we check whether the host told us there are 847 * more pending. 848 */ 849 paravirt_membar_sync(); 850 851 /* XXX bus_dmamap_sync(POSTREAD) on msg_flags */ 852 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) 853 hyperv_send_eom(); 854 /* XXX bus_dmamap_sync(PREREAD) on msg_flags */ 855 } 856 } 857 858 static void 859 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr) 860 { 861 struct vmbus_msg *msg; 862 struct vmbus_chanmsg_hdr *reqhdr; 863 int req; 864 865 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request; 866 mutex_enter(&sc->sc_req_lock); 867 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) { 868 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data; 869 if (reqhdr->chm_type == req) { 870 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry); 871 break; 872 } 873 } 874 mutex_exit(&sc->sc_req_lock); 875 if (msg != NULL) { 876 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen); 877 mutex_enter(&sc->sc_rsp_lock); 878 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry); 879 mutex_exit(&sc->sc_rsp_lock); 880 wakeup(msg); 881 } 882 } 883 884 static void 885 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 886 { 887 struct vmbus_chanmsg_choffer *co; 888 889 co = kmem_intr_alloc(sizeof(*co), KM_NOSLEEP); 890 if (co == NULL) { 891 device_printf(sc->sc_dev, 892 "failed to allocate an offer object\n"); 893 return; 894 } 895 896 memcpy(co, hdr, sizeof(*co)); 897 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_OFFER, co); 898 } 899 900 static void 901 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 902 { 903 struct vmbus_chanmsg_chrescind *cr; 904 905 cr = kmem_intr_alloc(sizeof(*cr), KM_NOSLEEP); 906 if (cr == NULL) { 907 device_printf(sc->sc_dev, 908 "failed to allocate an rescind object\n"); 909 return; 910 } 911 912 memcpy(cr, hdr, sizeof(*cr)); 913 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_RESCIND, cr); 914 } 915 916 static void 917 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr) 918 { 919 920 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED); 921 wakeup(&sc->sc_devq); 922 } 923 924 static void 925 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size) 926 { 927 static const struct { 928 const struct hyperv_guid *guid; 929 const char *ident; 930 } map[] = { 931 { &hyperv_guid_network, "network" }, 932 { &hyperv_guid_ide, "ide" }, 933 { &hyperv_guid_scsi, "scsi" }, 934 { &hyperv_guid_shutdown, "shutdown" }, 935 { &hyperv_guid_timesync, "timesync" }, 936 { &hyperv_guid_heartbeat, "heartbeat" }, 937 { &hyperv_guid_kvp, "kvp" }, 938 { &hyperv_guid_vss, "vss" }, 939 { &hyperv_guid_dynmem, "dynamic-memory" }, 940 { &hyperv_guid_mouse, "mouse" }, 941 { &hyperv_guid_kbd, "keyboard" }, 942 { &hyperv_guid_video, "video" }, 943 { &hyperv_guid_fc, "fiber-channel" }, 944 { &hyperv_guid_fcopy, "file-copy" }, 945 { &hyperv_guid_pcie, "pcie-passthrough" }, 946 { &hyperv_guid_netdir, "network-direct" }, 947 { &hyperv_guid_rdesktop, "remote-desktop" }, 948 { &hyperv_guid_avma1, "avma-1" }, 949 { &hyperv_guid_avma2, "avma-2" }, 950 { &hyperv_guid_avma3, "avma-3" }, 951 { &hyperv_guid_avma4, "avma-4" }, 952 }; 953 int i; 954 955 for (i = 0; i < __arraycount(map); i++) { 956 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) { 957 strlcpy(str, map[i].ident, size); 958 return; 959 } 960 } 961 hyperv_guid2str(guid, str, size); 962 } 963 964 static int 965 vmbus_channel_scan(struct vmbus_softc *sc) 966 { 967 struct vmbus_chanmsg_hdr hdr; 968 struct vmbus_chanmsg_choffer rsp; 969 970 TAILQ_INIT(&sc->sc_prichans); 971 mutex_init(&sc->sc_prichan_lock, MUTEX_DEFAULT, IPL_NET); 972 TAILQ_INIT(&sc->sc_channels); 973 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET); 974 975 /* 976 * This queue serializes vmbus channel offer and rescind messages. 977 */ 978 SIMPLEQ_INIT(&sc->sc_chevq); 979 mutex_init(&sc->sc_chevq_lock, MUTEX_DEFAULT, IPL_NET); 980 cv_init(&sc->sc_chevq_cv, "hvchevcv"); 981 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 982 vmbus_chevq_thread, sc, NULL, "hvchevq") != 0) { 983 DPRINTF("%s: failed to create prich chevq thread\n", 984 device_xname(sc->sc_dev)); 985 return -1; 986 } 987 988 /* 989 * This queue serializes vmbus devices' attach and detach 990 * for channel offer and rescind messages. 991 */ 992 SIMPLEQ_INIT(&sc->sc_devq); 993 mutex_init(&sc->sc_devq_lock, MUTEX_DEFAULT, IPL_NET); 994 cv_init(&sc->sc_devq_cv, "hvdevqcv"); 995 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 996 vmbus_devq_thread, sc, NULL, "hvdevq") != 0) { 997 DPRINTF("%s: failed to create prich devq thread\n", 998 device_xname(sc->sc_dev)); 999 return -1; 1000 } 1001 1002 /* 1003 * This queue handles sub-channel detach, so that vmbus 1004 * device's detach running in sc_devq can drain its sub-channels. 1005 */ 1006 SIMPLEQ_INIT(&sc->sc_subch_devq); 1007 mutex_init(&sc->sc_subch_devq_lock, MUTEX_DEFAULT, IPL_NET); 1008 cv_init(&sc->sc_subch_devq_cv, "hvsdvqcv"); 1009 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 1010 vmbus_subchannel_devq_thread, sc, NULL, "hvsdevq") != 0) { 1011 DPRINTF("%s: failed to create subch devq thread\n", 1012 device_xname(sc->sc_dev)); 1013 return -1; 1014 } 1015 1016 memset(&hdr, 0, sizeof(hdr)); 1017 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST; 1018 1019 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp), 1020 HCF_NOREPLY | HCF_NOSLEEP)) { 1021 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev)); 1022 return -1; 1023 } 1024 1025 while (!ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED)) 1026 tsleep(&sc->sc_devq, PRIBIO, "hvscan", 1); 1027 1028 mutex_enter(&sc->sc_chevq_lock); 1029 vmbus_process_chevq(sc); 1030 mutex_exit(&sc->sc_chevq_lock); 1031 mutex_enter(&sc->sc_devq_lock); 1032 vmbus_process_devq(sc); 1033 mutex_exit(&sc->sc_devq_lock); 1034 1035 return 0; 1036 } 1037 1038 static struct vmbus_channel * 1039 vmbus_channel_alloc(struct vmbus_softc *sc) 1040 { 1041 struct vmbus_channel *ch; 1042 1043 ch = kmem_zalloc(sizeof(*ch), KM_SLEEP); 1044 1045 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma, 1046 sizeof(*ch->ch_monprm), 8, 0, 1); 1047 if (ch->ch_monprm == NULL) { 1048 device_printf(sc->sc_dev, "monprm alloc failed\n"); 1049 kmem_free(ch, sizeof(*ch)); 1050 return NULL; 1051 } 1052 1053 ch->ch_refs = 1; 1054 ch->ch_sc = sc; 1055 mutex_init(&ch->ch_event_lock, MUTEX_DEFAULT, IPL_NET); 1056 cv_init(&ch->ch_event_cv, "hvevwait"); 1057 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET); 1058 cv_init(&ch->ch_subchannel_cv, "hvsubch"); 1059 TAILQ_INIT(&ch->ch_subchannels); 1060 1061 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1062 1063 return ch; 1064 } 1065 1066 static void 1067 vmbus_channel_free(struct vmbus_channel *ch) 1068 { 1069 struct vmbus_softc *sc = ch->ch_sc; 1070 1071 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) && 1072 ch->ch_subchannel_count == 0, "still owns sub-channels"); 1073 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED, 1074 "free busy channel"); 1075 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d", 1076 ch->ch_id, ch->ch_refs); 1077 1078 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma); 1079 mutex_destroy(&ch->ch_event_lock); 1080 cv_destroy(&ch->ch_event_cv); 1081 mutex_destroy(&ch->ch_subchannel_lock); 1082 cv_destroy(&ch->ch_subchannel_cv); 1083 /* XXX ch_evcnt */ 1084 if (ch->ch_taskq != NULL) 1085 softint_disestablish(ch->ch_taskq); 1086 kmem_free(ch, sizeof(*ch)); 1087 } 1088 1089 static int 1090 vmbus_channel_add(struct vmbus_channel *nch) 1091 { 1092 struct vmbus_softc *sc = nch->ch_sc; 1093 struct vmbus_channel *ch; 1094 int refs __diagused; 1095 1096 if (nch->ch_id == 0) { 1097 device_printf(sc->sc_dev, "got channel 0 offer, discard\n"); 1098 return EINVAL; 1099 } else if (nch->ch_id >= sc->sc_channel_max) { 1100 device_printf(sc->sc_dev, "invalid channel %u offer\n", 1101 nch->ch_id); 1102 return EINVAL; 1103 } 1104 1105 mutex_enter(&sc->sc_prichan_lock); 1106 TAILQ_FOREACH(ch, &sc->sc_prichans, ch_prientry) { 1107 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) && 1108 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst))) 1109 break; 1110 } 1111 if (VMBUS_CHAN_ISPRIMARY(nch)) { 1112 if (ch == NULL) { 1113 TAILQ_INSERT_TAIL(&sc->sc_prichans, nch, ch_prientry); 1114 mutex_exit(&sc->sc_prichan_lock); 1115 goto done; 1116 } else { 1117 mutex_exit(&sc->sc_prichan_lock); 1118 device_printf(sc->sc_dev, 1119 "duplicated primary channel%u\n", nch->ch_id); 1120 return EINVAL; 1121 } 1122 } else { 1123 if (ch == NULL) { 1124 mutex_exit(&sc->sc_prichan_lock); 1125 device_printf(sc->sc_dev, "no primary channel%u\n", 1126 nch->ch_id); 1127 return EINVAL; 1128 } 1129 } 1130 mutex_exit(&sc->sc_prichan_lock); 1131 1132 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch)); 1133 KASSERT(ch != NULL); 1134 1135 refs = atomic_inc_uint_nv(&nch->ch_refs); 1136 KASSERT(refs == 2); 1137 1138 nch->ch_primary_channel = ch; 1139 nch->ch_dev = ch->ch_dev; 1140 1141 mutex_enter(&ch->ch_subchannel_lock); 1142 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry); 1143 ch->ch_subchannel_count++; 1144 cv_signal(&ch->ch_subchannel_cv); 1145 mutex_exit(&ch->ch_subchannel_lock); 1146 1147 done: 1148 mutex_enter(&sc->sc_channel_lock); 1149 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry); 1150 mutex_exit(&sc->sc_channel_lock); 1151 1152 vmbus_channel_cpu_default(nch); 1153 1154 return 0; 1155 } 1156 1157 void 1158 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu) 1159 { 1160 struct vmbus_softc *sc = ch->ch_sc; 1161 1162 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu); 1163 1164 if (sc->sc_proto == VMBUS_VERSION_WS2008 || 1165 sc->sc_proto == VMBUS_VERSION_WIN7) { 1166 /* Only cpu0 is supported */ 1167 cpu = 0; 1168 } 1169 1170 ch->ch_cpuid = cpu; 1171 ch->ch_vcpu = hyperv_get_vcpuid(cpu); 1172 1173 aprint_debug_dev(ch->ch_dev != NULL ? ch->ch_dev : sc->sc_dev, 1174 "channel %u assigned to cpu%u [vcpu%u]\n", 1175 ch->ch_id, ch->ch_cpuid, ch->ch_vcpu); 1176 } 1177 1178 void 1179 vmbus_channel_cpu_rr(struct vmbus_channel *ch) 1180 { 1181 static uint32_t vmbus_channel_nextcpu; 1182 int cpu; 1183 1184 cpu = atomic_inc_32_nv(&vmbus_channel_nextcpu) % ncpu; 1185 vmbus_channel_cpu_set(ch, cpu); 1186 } 1187 1188 static void 1189 vmbus_channel_cpu_default(struct vmbus_channel *ch) 1190 { 1191 1192 /* 1193 * By default, pin the channel to cpu0. Devices having 1194 * special channel-cpu mapping requirement should call 1195 * vmbus_channel_cpu_{set,rr}(). 1196 */ 1197 vmbus_channel_cpu_set(ch, 0); 1198 } 1199 1200 bool 1201 vmbus_channel_is_revoked(struct vmbus_channel *ch) 1202 { 1203 1204 return (ch->ch_flags & CHF_REVOKED) ? true : false; 1205 } 1206 1207 static void 1208 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_choffer *co) 1209 { 1210 struct vmbus_channel *ch; 1211 1212 ch = vmbus_channel_alloc(sc); 1213 if (ch == NULL) { 1214 device_printf(sc->sc_dev, "allocate channel %u failed\n", 1215 co->chm_chanid); 1216 return; 1217 } 1218 1219 /* 1220 * By default we setup state to enable batched reading. 1221 * A specific service can choose to disable this prior 1222 * to opening the channel. 1223 */ 1224 ch->ch_flags |= CHF_BATCHED; 1225 1226 hyperv_guid_sprint(&co->chm_chtype, ch->ch_ident, 1227 sizeof(ch->ch_ident)); 1228 1229 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT; 1230 if (sc->sc_proto > VMBUS_VERSION_WS2008) 1231 ch->ch_monprm->mp_connid = co->chm_connid; 1232 1233 if (co->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) { 1234 ch->ch_mgroup = co->chm_montrig / VMBUS_MONTRIG_LEN; 1235 ch->ch_mindex = co->chm_montrig % VMBUS_MONTRIG_LEN; 1236 ch->ch_flags |= CHF_MONITOR; 1237 } 1238 1239 ch->ch_id = co->chm_chanid; 1240 ch->ch_subidx = co->chm_subidx; 1241 1242 memcpy(&ch->ch_type, &co->chm_chtype, sizeof(ch->ch_type)); 1243 memcpy(&ch->ch_inst, &co->chm_chinst, sizeof(ch->ch_inst)); 1244 1245 if (vmbus_channel_add(ch) != 0) { 1246 atomic_dec_uint(&ch->ch_refs); 1247 vmbus_channel_free(ch); 1248 return; 1249 } 1250 1251 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1252 1253 vmbus_devq_enqueue(sc, VMBUS_DEV_TYPE_ATTACH, ch); 1254 1255 #ifdef HYPERV_DEBUG 1256 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id, 1257 ch->ch_ident); 1258 if (ch->ch_flags & CHF_MONITOR) 1259 printf(", monitor %u\n", co->chm_montrig); 1260 else 1261 printf("\n"); 1262 #endif 1263 } 1264 1265 static void 1266 vmbus_process_rescind(struct vmbus_softc *sc, 1267 struct vmbus_chanmsg_chrescind *cr) 1268 { 1269 struct vmbus_channel *ch; 1270 1271 if (cr->chm_chanid > VMBUS_CHAN_MAX) { 1272 device_printf(sc->sc_dev, "invalid revoked channel%u\n", 1273 cr->chm_chanid); 1274 return; 1275 } 1276 1277 mutex_enter(&sc->sc_channel_lock); 1278 ch = vmbus_channel_lookup(sc, cr->chm_chanid); 1279 if (ch == NULL) { 1280 mutex_exit(&sc->sc_channel_lock); 1281 device_printf(sc->sc_dev, "channel%u is not offered\n", 1282 cr->chm_chanid); 1283 return; 1284 } 1285 TAILQ_REMOVE(&sc->sc_channels, ch, ch_entry); 1286 mutex_exit(&sc->sc_channel_lock); 1287 1288 if (VMBUS_CHAN_ISPRIMARY(ch)) { 1289 mutex_enter(&sc->sc_prichan_lock); 1290 TAILQ_REMOVE(&sc->sc_prichans, ch, ch_prientry); 1291 mutex_exit(&sc->sc_prichan_lock); 1292 } 1293 1294 KASSERTMSG(!(ch->ch_flags & CHF_REVOKED), 1295 "channel%u has already been revoked", ch->ch_id); 1296 atomic_or_uint(&ch->ch_flags, CHF_REVOKED); 1297 1298 vmbus_channel_detach(ch); 1299 } 1300 1301 static int 1302 vmbus_channel_release(struct vmbus_channel *ch) 1303 { 1304 struct vmbus_softc *sc = ch->ch_sc; 1305 struct vmbus_chanmsg_chfree cmd; 1306 int rv; 1307 1308 memset(&cmd, 0, sizeof(cmd)); 1309 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE; 1310 cmd.chm_chanid = ch->ch_id; 1311 1312 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1313 HCF_NOREPLY | HCF_SLEEPOK); 1314 if (rv) { 1315 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev), 1316 rv); 1317 } 1318 return rv; 1319 } 1320 1321 struct vmbus_channel ** 1322 vmbus_subchannel_get(struct vmbus_channel *prich, int subchan_cnt) 1323 { 1324 struct vmbus_softc *sc = prich->ch_sc; 1325 struct vmbus_channel **ret, *ch; 1326 int i, s; 1327 1328 KASSERTMSG(subchan_cnt > 0, 1329 "invalid sub-channel count %d", subchan_cnt); 1330 1331 ret = kmem_zalloc(sizeof(struct vmbus_channel *) * subchan_cnt, 1332 KM_SLEEP); 1333 1334 mutex_enter(&prich->ch_subchannel_lock); 1335 1336 while (prich->ch_subchannel_count < subchan_cnt) { 1337 if (cold) { 1338 mutex_exit(&prich->ch_subchannel_lock); 1339 delay(1000); 1340 s = splnet(); 1341 hyperv_intr(); 1342 splx(s); 1343 mutex_enter(&sc->sc_chevq_lock); 1344 vmbus_process_chevq(sc); 1345 mutex_exit(&sc->sc_chevq_lock); 1346 mutex_enter(&prich->ch_subchannel_lock); 1347 } else { 1348 cv_wait(&prich->ch_subchannel_cv, 1349 &prich->ch_subchannel_lock); 1350 } 1351 } 1352 1353 i = 0; 1354 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) { 1355 ret[i] = ch; /* XXX inc refs */ 1356 1357 if (++i == subchan_cnt) 1358 break; 1359 } 1360 1361 KASSERTMSG(i == subchan_cnt, "invalid subchan count %d, should be %d", 1362 prich->ch_subchannel_count, subchan_cnt); 1363 1364 mutex_exit(&prich->ch_subchannel_lock); 1365 1366 return ret; 1367 } 1368 1369 void 1370 vmbus_subchannel_rel(struct vmbus_channel **subch, int cnt) 1371 { 1372 1373 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt); 1374 } 1375 1376 void 1377 vmbus_subchannel_drain(struct vmbus_channel *prich) 1378 { 1379 int s; 1380 1381 mutex_enter(&prich->ch_subchannel_lock); 1382 while (prich->ch_subchannel_count > 0) { 1383 if (cold) { 1384 mutex_exit(&prich->ch_subchannel_lock); 1385 delay(1000); 1386 s = splnet(); 1387 hyperv_intr(); 1388 splx(s); 1389 mutex_enter(&prich->ch_subchannel_lock); 1390 } else { 1391 cv_wait(&prich->ch_subchannel_cv, 1392 &prich->ch_subchannel_lock); 1393 } 1394 } 1395 mutex_exit(&prich->ch_subchannel_lock); 1396 } 1397 1398 static struct vmbus_channel * 1399 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t chanid) 1400 { 1401 struct vmbus_channel *ch = NULL; 1402 1403 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) { 1404 if (ch->ch_id == chanid) 1405 return ch; 1406 } 1407 return NULL; 1408 } 1409 1410 static int 1411 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen) 1412 { 1413 struct vmbus_softc *sc = ch->ch_sc; 1414 1415 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring); 1416 ch->ch_ring_size = 2 * buflen; 1417 /* page aligned memory */ 1418 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma, 1419 ch->ch_ring_size, PAGE_SIZE, 0, 1); 1420 if (ch->ch_ring == NULL) { 1421 device_printf(sc->sc_dev, 1422 "failed to allocate channel ring\n"); 1423 return ENOMEM; 1424 } 1425 1426 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1427 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring; 1428 ch->ch_wrd.rd_size = buflen; 1429 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1430 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1431 1432 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1433 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring + 1434 buflen); 1435 ch->ch_rrd.rd_size = buflen; 1436 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring); 1437 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET); 1438 1439 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size, 1440 &ch->ch_ring_gpadl)) { 1441 device_printf(sc->sc_dev, 1442 "failed to obtain a PA handle for the ring\n"); 1443 vmbus_channel_ring_destroy(ch); 1444 return ENOMEM; 1445 } 1446 1447 return 0; 1448 } 1449 1450 static void 1451 vmbus_channel_ring_destroy(struct vmbus_channel *ch) 1452 { 1453 struct vmbus_softc *sc = ch->ch_sc; 1454 1455 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma); 1456 ch->ch_ring = NULL; 1457 vmbus_handle_free(ch, ch->ch_ring_gpadl); 1458 1459 mutex_destroy(&ch->ch_wrd.rd_lock); 1460 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd)); 1461 mutex_destroy(&ch->ch_rrd.rd_lock); 1462 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd)); 1463 } 1464 1465 int 1466 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata, 1467 size_t udatalen, void (*handler)(void *), void *arg) 1468 { 1469 struct vmbus_softc *sc = ch->ch_sc; 1470 struct vmbus_chanmsg_chopen cmd; 1471 struct vmbus_chanmsg_chopen_resp rsp; 1472 int rv = EINVAL; 1473 1474 if (ch->ch_ring == NULL && 1475 (rv = vmbus_channel_ring_create(ch, buflen))) { 1476 DPRINTF("%s: failed to create channel ring\n", 1477 device_xname(sc->sc_dev)); 1478 return rv; 1479 } 1480 1481 __insn_barrier(); 1482 sc->sc_chanmap[ch->ch_id] = ch; 1483 1484 memset(&cmd, 0, sizeof(cmd)); 1485 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN; 1486 cmd.chm_openid = ch->ch_id; 1487 cmd.chm_chanid = ch->ch_id; 1488 cmd.chm_gpadl = ch->ch_ring_gpadl; 1489 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size); 1490 cmd.chm_vcpuid = ch->ch_vcpu; 1491 if (udata && udatalen > 0) 1492 memcpy(cmd.chm_udata, udata, udatalen); 1493 1494 memset(&rsp, 0, sizeof(rsp)); 1495 1496 ch->ch_handler = handler; 1497 ch->ch_ctx = arg; 1498 ch->ch_state = VMBUS_CHANSTATE_OPENED; 1499 1500 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), HCF_NOSLEEP); 1501 if (rv) { 1502 sc->sc_chanmap[ch->ch_id] = NULL; 1503 vmbus_channel_ring_destroy(ch); 1504 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev), 1505 rv); 1506 ch->ch_handler = NULL; 1507 ch->ch_ctx = NULL; 1508 ch->ch_state = VMBUS_CHANSTATE_OFFERED; 1509 return rv; 1510 } 1511 return 0; 1512 } 1513 1514 static void 1515 vmbus_channel_detach(struct vmbus_channel *ch) 1516 { 1517 u_int refs; 1518 1519 KASSERTMSG(ch->ch_refs > 0, "channel%u: invalid refcnt %d", 1520 ch->ch_id, ch->ch_refs); 1521 1522 membar_release(); 1523 refs = atomic_dec_uint_nv(&ch->ch_refs); 1524 if (refs == 0) { 1525 membar_acquire(); 1526 /* Detach the target channel. */ 1527 vmbus_devq_enqueue(ch->ch_sc, VMBUS_DEV_TYPE_DETACH, ch); 1528 } 1529 } 1530 1531 static int 1532 vmbus_channel_close_internal(struct vmbus_channel *ch) 1533 { 1534 struct vmbus_softc *sc = ch->ch_sc; 1535 struct vmbus_chanmsg_chclose cmd; 1536 int rv; 1537 1538 sc->sc_chanmap[ch->ch_id] = NULL; 1539 1540 memset(&cmd, 0, sizeof(cmd)); 1541 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE; 1542 cmd.chm_chanid = ch->ch_id; 1543 1544 ch->ch_state = VMBUS_CHANSTATE_CLOSING; 1545 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0, 1546 HCF_NOREPLY | HCF_NOSLEEP); 1547 if (rv) { 1548 DPRINTF("%s: CHCLOSE failed with %d\n", 1549 device_xname(sc->sc_dev), rv); 1550 return rv; 1551 } 1552 ch->ch_state = VMBUS_CHANSTATE_CLOSED; 1553 vmbus_channel_ring_destroy(ch); 1554 return 0; 1555 } 1556 1557 int 1558 vmbus_channel_close_direct(struct vmbus_channel *ch) 1559 { 1560 int rv; 1561 1562 rv = vmbus_channel_close_internal(ch); 1563 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1564 vmbus_channel_detach(ch); 1565 return rv; 1566 } 1567 1568 int 1569 vmbus_channel_close(struct vmbus_channel *ch) 1570 { 1571 struct vmbus_channel **subch; 1572 int i, cnt, rv; 1573 1574 if (!VMBUS_CHAN_ISPRIMARY(ch)) 1575 return 0; 1576 1577 cnt = ch->ch_subchannel_count; 1578 if (cnt > 0) { 1579 subch = vmbus_subchannel_get(ch, cnt); 1580 for (i = 0; i < ch->ch_subchannel_count; i++) { 1581 rv = vmbus_channel_close_internal(subch[i]); 1582 (void) rv; /* XXX */ 1583 vmbus_channel_detach(ch); 1584 } 1585 vmbus_subchannel_rel(subch, cnt); 1586 } 1587 1588 return vmbus_channel_close_internal(ch); 1589 } 1590 1591 static inline void 1592 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch) 1593 { 1594 struct vmbus_mon_trig *mtg; 1595 1596 /* Each uint32_t represents 32 channels */ 1597 set_bit(ch->ch_id, sc->sc_wevents); 1598 if (ch->ch_flags & CHF_MONITOR) { 1599 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup]; 1600 set_bit(ch->ch_mindex, &mtg->mt_pending); 1601 } else 1602 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma)); 1603 } 1604 1605 static void 1606 vmbus_channel_intr(void *arg) 1607 { 1608 struct vmbus_channel *ch = arg; 1609 1610 if (vmbus_channel_ready(ch)) 1611 ch->ch_handler(ch->ch_ctx); 1612 1613 if (vmbus_channel_unpause(ch) == 0) 1614 return; 1615 1616 vmbus_channel_pause(ch); 1617 vmbus_channel_schedule(ch); 1618 } 1619 1620 int 1621 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name) 1622 { 1623 1624 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 1625 vmbus_channel_intr, ch); 1626 if (ch->ch_taskq == NULL) 1627 return -1; 1628 return 0; 1629 } 1630 1631 void 1632 vmbus_channel_schedule(struct vmbus_channel *ch) 1633 { 1634 1635 if (ch->ch_handler) { 1636 if (!cold && (ch->ch_flags & CHF_BATCHED)) { 1637 vmbus_channel_pause(ch); 1638 softint_schedule(ch->ch_taskq); 1639 } else 1640 ch->ch_handler(ch->ch_ctx); 1641 } 1642 } 1643 1644 static __inline void 1645 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen) 1646 { 1647 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod); 1648 1649 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left); 1650 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left); 1651 wrd->rd_prod += datalen; 1652 if (wrd->rd_prod >= wrd->rd_dsize) 1653 wrd->rd_prod -= wrd->rd_dsize; 1654 } 1655 1656 static inline void 1657 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen, 1658 int peek) 1659 { 1660 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons); 1661 1662 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left); 1663 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left); 1664 if (!peek) { 1665 rrd->rd_cons += datalen; 1666 if (rrd->rd_cons >= rrd->rd_dsize) 1667 rrd->rd_cons -= rrd->rd_dsize; 1668 } 1669 } 1670 1671 static __inline void 1672 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite, 1673 uint32_t *toread) 1674 { 1675 /* XXX bus_dmamap_sync(POSTREAD) on br_rindex/br_windex */ 1676 uint32_t ridx = rd->rd_ring->br_rindex; 1677 uint32_t widx = rd->rd_ring->br_windex; 1678 /* XXX bus_dmamap_sync(PREREAD) on br_rindex/br_windex */ 1679 uint32_t r, w; 1680 1681 if (widx >= ridx) 1682 w = rd->rd_dsize - (widx - ridx); 1683 else 1684 w = ridx - widx; 1685 r = rd->rd_dsize - w; 1686 if (towrite) 1687 *towrite = w; 1688 if (toread) 1689 *toread = r; 1690 } 1691 1692 static bool 1693 vmbus_ring_is_empty(struct vmbus_ring_data *rd) 1694 { 1695 1696 /* XXX bus_dmamap_sync(POSTREAD) on br_rindex/br_windex */ 1697 return rd->rd_ring->br_rindex == rd->rd_ring->br_windex; 1698 /* XXX bus_dmamap_sync(PREREAD) on br_rindex/br_windex */ 1699 } 1700 1701 static int 1702 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt, 1703 int *needsig) 1704 { 1705 uint64_t indices = 0; 1706 uint32_t avail, oprod, datalen = sizeof(indices); 1707 int i; 1708 1709 for (i = 0; i < iov_cnt; i++) 1710 datalen += iov[i].iov_len; 1711 1712 KASSERT(datalen <= wrd->rd_dsize); 1713 1714 vmbus_ring_avail(wrd, &avail, NULL); 1715 if (avail <= datalen) { 1716 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1717 return EAGAIN; 1718 } 1719 1720 oprod = wrd->rd_prod; 1721 1722 /* XXX bus_dmamap_sync(POSTWRITE) on ring data */ 1723 1724 for (i = 0; i < iov_cnt; i++) 1725 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len); 1726 1727 indices = (uint64_t)oprod << 32; 1728 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices)); 1729 1730 /* XXX bus_dmamap_sync(PREWRITE) on ring data */ 1731 1732 membar_sync(); /* XXX bus_dmamap_sync(POSTWRITE) on br_windex */ 1733 wrd->rd_ring->br_windex = wrd->rd_prod; 1734 /* XXX bus_dmamap_sync(PREWRITE) on br_windex */ 1735 1736 /* 1737 * Ensure we publish the producer index _before_ we check 1738 * whether the host needs to be notified. 1739 */ 1740 paravirt_membar_sync(); 1741 1742 /* XXX bus_dmamap_sync(POSTREAD) on br_rindex */ 1743 1744 /* Signal when the ring transitions from being empty to non-empty */ 1745 if (wrd->rd_ring->br_imask == 0 && 1746 wrd->rd_ring->br_rindex == oprod) 1747 *needsig = 1; 1748 else 1749 *needsig = 0; 1750 1751 /* XXX bus_dmamap_sync(PREREAD) on br_rindex */ 1752 1753 return 0; 1754 } 1755 1756 int 1757 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen, 1758 uint64_t rid, int type, uint32_t flags) 1759 { 1760 struct vmbus_softc *sc = ch->ch_sc; 1761 struct vmbus_chanpkt cp; 1762 struct iovec iov[3]; 1763 uint32_t pktlen, pktlen_aligned; 1764 uint64_t zeropad = 0; 1765 int rv, needsig = 0; 1766 1767 pktlen = sizeof(cp) + datalen; 1768 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1769 1770 cp.cp_hdr.cph_type = type; 1771 cp.cp_hdr.cph_flags = flags; 1772 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp)); 1773 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1774 cp.cp_hdr.cph_tid = rid; 1775 1776 iov[0].iov_base = &cp; 1777 iov[0].iov_len = sizeof(cp); 1778 1779 iov[1].iov_base = data; 1780 iov[1].iov_len = datalen; 1781 1782 iov[2].iov_base = &zeropad; 1783 iov[2].iov_len = pktlen_aligned - pktlen; 1784 1785 mutex_enter(&ch->ch_wrd.rd_lock); 1786 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig); 1787 mutex_exit(&ch->ch_wrd.rd_lock); 1788 if (rv == 0 && needsig) 1789 vmbus_channel_setevent(sc, ch); 1790 1791 return rv; 1792 } 1793 1794 int 1795 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl, 1796 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid) 1797 { 1798 struct vmbus_softc *sc = ch->ch_sc; 1799 struct vmbus_chanpkt_sglist cp; 1800 struct iovec iov[4]; 1801 uint32_t buflen, pktlen, pktlen_aligned; 1802 uint64_t zeropad = 0; 1803 int rv, needsig = 0; 1804 1805 buflen = sizeof(struct vmbus_gpa) * nsge; 1806 pktlen = sizeof(cp) + datalen + buflen; 1807 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1808 1809 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1810 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1811 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1812 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1813 cp.cp_hdr.cph_tid = rid; 1814 cp.cp_gpa_cnt = nsge; 1815 cp.cp_rsvd = 0; 1816 1817 iov[0].iov_base = &cp; 1818 iov[0].iov_len = sizeof(cp); 1819 1820 iov[1].iov_base = sgl; 1821 iov[1].iov_len = buflen; 1822 1823 iov[2].iov_base = data; 1824 iov[2].iov_len = datalen; 1825 1826 iov[3].iov_base = &zeropad; 1827 iov[3].iov_len = pktlen_aligned - pktlen; 1828 1829 mutex_enter(&ch->ch_wrd.rd_lock); 1830 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1831 mutex_exit(&ch->ch_wrd.rd_lock); 1832 if (rv == 0 && needsig) 1833 vmbus_channel_setevent(sc, ch); 1834 1835 return rv; 1836 } 1837 1838 int 1839 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl, 1840 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid) 1841 { 1842 struct vmbus_softc *sc = ch->ch_sc; 1843 struct vmbus_chanpkt_prplist cp; 1844 struct iovec iov[4]; 1845 uint32_t buflen, pktlen, pktlen_aligned; 1846 uint64_t zeropad = 0; 1847 int rv, needsig = 0; 1848 1849 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1); 1850 pktlen = sizeof(cp) + datalen + buflen; 1851 pktlen_aligned = roundup(pktlen, sizeof(uint64_t)); 1852 1853 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA; 1854 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC; 1855 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen); 1856 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned); 1857 cp.cp_hdr.cph_tid = rid; 1858 cp.cp_range_cnt = 1; 1859 cp.cp_rsvd = 0; 1860 1861 iov[0].iov_base = &cp; 1862 iov[0].iov_len = sizeof(cp); 1863 1864 iov[1].iov_base = prpl; 1865 iov[1].iov_len = buflen; 1866 1867 iov[2].iov_base = data; 1868 iov[2].iov_len = datalen; 1869 1870 iov[3].iov_base = &zeropad; 1871 iov[3].iov_len = pktlen_aligned - pktlen; 1872 1873 mutex_enter(&ch->ch_wrd.rd_lock); 1874 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig); 1875 mutex_exit(&ch->ch_wrd.rd_lock); 1876 if (rv == 0 && needsig) 1877 vmbus_channel_setevent(sc, ch); 1878 1879 return rv; 1880 } 1881 1882 static int 1883 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen) 1884 { 1885 uint32_t avail; 1886 1887 KASSERT(datalen <= rrd->rd_dsize); 1888 1889 vmbus_ring_avail(rrd, NULL, &avail); 1890 if (avail < datalen) 1891 return EAGAIN; 1892 1893 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1); 1894 return 0; 1895 } 1896 1897 static int 1898 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen, 1899 uint32_t offset) 1900 { 1901 uint64_t indices; 1902 uint32_t avail; 1903 1904 KASSERT(datalen <= rrd->rd_dsize); 1905 1906 vmbus_ring_avail(rrd, NULL, &avail); 1907 if (avail < datalen) { 1908 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen); 1909 return EAGAIN; 1910 } 1911 1912 /* XXX bus_dmamap_sync(POSTREAD) on ring data */ 1913 1914 if (offset) { 1915 rrd->rd_cons += offset; 1916 if (rrd->rd_cons >= rrd->rd_dsize) 1917 rrd->rd_cons -= rrd->rd_dsize; 1918 } 1919 1920 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0); 1921 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0); 1922 1923 /* XXX bus_dmamap_sync(PREREAD) on ring data */ 1924 1925 membar_sync(); /* XXX bus_dmamap_sync(POSTWRITE) on br_rindex */ 1926 rrd->rd_ring->br_rindex = rrd->rd_cons; 1927 /* XXX bus_dmamap_sync(PREWRITE) on br_rindex */ 1928 1929 return 0; 1930 } 1931 1932 int 1933 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen, 1934 uint32_t *rlen, uint64_t *rid, int raw) 1935 { 1936 struct vmbus_softc *sc = ch->ch_sc; 1937 struct vmbus_chanpkt_hdr cph; 1938 uint32_t offset, pktlen; 1939 int rv; 1940 1941 *rlen = 0; 1942 1943 mutex_enter(&ch->ch_rrd.rd_lock); 1944 1945 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) { 1946 mutex_exit(&ch->ch_rrd.rd_lock); 1947 return rv; 1948 } 1949 1950 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen); 1951 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset; 1952 if (pktlen > datalen) { 1953 mutex_exit(&ch->ch_rrd.rd_lock); 1954 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n", 1955 __func__, pktlen, datalen); 1956 return EINVAL; 1957 } 1958 1959 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset); 1960 if (rv == 0) { 1961 *rlen = pktlen; 1962 *rid = cph.cph_tid; 1963 } 1964 1965 mutex_exit(&ch->ch_rrd.rd_lock); 1966 1967 return rv; 1968 } 1969 1970 static inline void 1971 vmbus_ring_mask(struct vmbus_ring_data *rd) 1972 { 1973 1974 membar_sync(); /* XXX bus_dmamap_sync(POSTWRITE) on br_imask */ 1975 rd->rd_ring->br_imask = 1; 1976 membar_sync(); /* XXX bus_dmamap_sync(PREWRITE) on br_imask */ 1977 } 1978 1979 static inline void 1980 vmbus_ring_unmask(struct vmbus_ring_data *rd) 1981 { 1982 1983 membar_sync(); /* XXX bus_dmamap_sync(POSTWRITE) on br_imask */ 1984 rd->rd_ring->br_imask = 0; 1985 membar_sync(); /* XXX bus_dmamap_sync(PREWRITE) on br_imask */ 1986 } 1987 1988 void 1989 vmbus_channel_pause(struct vmbus_channel *ch) 1990 { 1991 1992 atomic_or_ulong(&ch->ch_sc->sc_evtmask[ch->ch_id / VMBUS_EVTFLAG_LEN], 1993 __BIT(ch->ch_id % VMBUS_EVTFLAG_LEN)); 1994 vmbus_ring_mask(&ch->ch_rrd); 1995 } 1996 1997 uint32_t 1998 vmbus_channel_unpause(struct vmbus_channel *ch) 1999 { 2000 uint32_t avail; 2001 2002 atomic_and_ulong(&ch->ch_sc->sc_evtmask[ch->ch_id / VMBUS_EVTFLAG_LEN], 2003 ~__BIT(ch->ch_id % VMBUS_EVTFLAG_LEN)); 2004 vmbus_ring_unmask(&ch->ch_rrd); 2005 2006 /* 2007 * Ensure we announce to the host side that we are accepting 2008 * interrupts _before_ we check whether any pending events had 2009 * come over the ring while we weren't accepting interrupts. 2010 */ 2011 paravirt_membar_sync(); 2012 2013 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 2014 2015 return avail; 2016 } 2017 2018 uint32_t 2019 vmbus_channel_ready(struct vmbus_channel *ch) 2020 { 2021 uint32_t avail; 2022 2023 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail); 2024 2025 return avail; 2026 } 2027 2028 bool 2029 vmbus_channel_tx_empty(struct vmbus_channel *ch) 2030 { 2031 2032 return vmbus_ring_is_empty(&ch->ch_wrd); 2033 } 2034 2035 bool 2036 vmbus_channel_rx_empty(struct vmbus_channel *ch) 2037 { 2038 2039 return vmbus_ring_is_empty(&ch->ch_rrd); 2040 } 2041 2042 /* How many PFNs can be referenced by the header */ 2043 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \ 2044 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t)) 2045 2046 /* How many PFNs can be referenced by the body */ 2047 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \ 2048 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t)) 2049 2050 int 2051 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma, 2052 uint32_t buflen, uint32_t *handle) 2053 { 2054 struct vmbus_softc *sc = ch->ch_sc; 2055 struct vmbus_chanmsg_gpadl_conn *hdr; 2056 struct vmbus_chanmsg_gpadl_subconn *cmd; 2057 struct vmbus_chanmsg_gpadl_connresp rsp; 2058 struct vmbus_msg *msg; 2059 int i, j, last, left, rv; 2060 int bodylen = 0, ncmds = 0, pfn = 0; 2061 uint64_t *frames; 2062 paddr_t pa; 2063 uint8_t *body; 2064 /* Total number of pages to reference */ 2065 int total = atop(buflen); 2066 /* Number of pages that will fit the header */ 2067 int inhdr = MIN(total, VMBUS_NPFNHDR); 2068 2069 KASSERT((buflen & PAGE_MASK) == 0); 2070 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize); 2071 2072 msg = pool_cache_get_paddr(sc->sc_msgpool, PR_WAITOK, &pa); 2073 2074 /* Prepare array of frame addresses */ 2075 frames = kmem_zalloc(total * sizeof(*frames), KM_SLEEP); 2076 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) { 2077 bus_dma_segment_t *seg = &dma->map->dm_segs[i]; 2078 bus_addr_t addr = seg->ds_addr; 2079 2080 KASSERT((addr & PAGE_MASK) == 0); 2081 KASSERT((seg->ds_len & PAGE_MASK) == 0); 2082 2083 while (addr < seg->ds_addr + seg->ds_len && j < total) { 2084 frames[j++] = atop(addr); 2085 addr += PAGE_SIZE; 2086 } 2087 } 2088 2089 memset(msg, 0, sizeof(*msg)); 2090 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) + 2091 inhdr * sizeof(uint64_t); 2092 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data; 2093 msg->msg_rsp = &rsp; 2094 msg->msg_rsplen = sizeof(rsp); 2095 msg->msg_flags = MSGF_NOSLEEP; 2096 2097 left = total - inhdr; 2098 2099 /* Allocate additional gpadl_body structures if required */ 2100 if (left > 0) { 2101 ncmds = howmany(left, VMBUS_NPFNBODY); 2102 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX; 2103 body = kmem_zalloc(bodylen, KM_SLEEP); 2104 } 2105 2106 *handle = atomic_inc_32_nv(&sc->sc_handle); 2107 2108 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN; 2109 hdr->chm_chanid = ch->ch_id; 2110 hdr->chm_gpadl = *handle; 2111 2112 /* Single range for a contiguous buffer */ 2113 hdr->chm_range_cnt = 1; 2114 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total * 2115 sizeof(uint64_t); 2116 hdr->chm_range.gpa_ofs = 0; 2117 hdr->chm_range.gpa_len = buflen; 2118 2119 /* Fit as many pages as possible into the header */ 2120 for (i = 0; i < inhdr; i++) 2121 hdr->chm_range.gpa_page[i] = frames[pfn++]; 2122 2123 for (i = 0; i < ncmds; i++) { 2124 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 2125 VMBUS_MSG_DSIZE_MAX * i); 2126 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN; 2127 cmd->chm_gpadl = *handle; 2128 last = MIN(left, VMBUS_NPFNBODY); 2129 for (j = 0; j < last; j++) 2130 cmd->chm_gpa_page[j] = frames[pfn++]; 2131 left -= last; 2132 } 2133 2134 rv = vmbus_start(sc, msg, pa); 2135 if (rv != 0) { 2136 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev)); 2137 goto out; 2138 } 2139 for (i = 0; i < ncmds; i++) { 2140 int cmdlen = sizeof(*cmd); 2141 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body + 2142 VMBUS_MSG_DSIZE_MAX * i); 2143 /* Last element can be short */ 2144 if (i == ncmds - 1) 2145 cmdlen += last * sizeof(uint64_t); 2146 else 2147 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t); 2148 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0, 2149 HCF_NOREPLY | HCF_NOSLEEP); 2150 if (rv != 0) { 2151 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed " 2152 "with %d\n", device_xname(sc->sc_dev), i, ncmds, 2153 rv); 2154 goto out; 2155 } 2156 } 2157 rv = vmbus_reply(sc, msg); 2158 if (rv != 0) { 2159 DPRINTF("%s: GPADL allocation failed with %d\n", 2160 device_xname(sc->sc_dev), rv); 2161 } 2162 2163 out: 2164 if (bodylen > 0) 2165 kmem_free(body, bodylen); 2166 kmem_free(frames, total * sizeof(*frames)); 2167 pool_cache_put_paddr(sc->sc_msgpool, msg, pa); 2168 if (rv) 2169 return rv; 2170 2171 KASSERT(*handle == rsp.chm_gpadl); 2172 2173 return 0; 2174 } 2175 2176 void 2177 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle) 2178 { 2179 struct vmbus_softc *sc = ch->ch_sc; 2180 struct vmbus_chanmsg_gpadl_disconn cmd; 2181 struct vmbus_chanmsg_gpadl_disconn rsp; 2182 int rv; 2183 2184 memset(&cmd, 0, sizeof(cmd)); 2185 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN; 2186 cmd.chm_chanid = ch->ch_id; 2187 cmd.chm_gpadl = handle; 2188 2189 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), HCF_NOSLEEP); 2190 if (rv) { 2191 DPRINTF("%s: GPADL_DISCONN failed with %d\n", 2192 device_xname(sc->sc_dev), rv); 2193 } 2194 } 2195 2196 static void 2197 vmbus_chevq_enqueue(struct vmbus_softc *sc, int type, void *arg) 2198 { 2199 struct vmbus_chev *vce; 2200 2201 vce = kmem_intr_alloc(sizeof(*vce), KM_NOSLEEP); 2202 if (vce == NULL) { 2203 device_printf(sc->sc_dev, "failed to allocate chev\n"); 2204 return; 2205 } 2206 2207 vce->vce_type = type; 2208 vce->vce_arg = arg; 2209 2210 mutex_enter(&sc->sc_chevq_lock); 2211 SIMPLEQ_INSERT_TAIL(&sc->sc_chevq, vce, vce_entry); 2212 cv_broadcast(&sc->sc_chevq_cv); 2213 mutex_exit(&sc->sc_chevq_lock); 2214 } 2215 2216 static void 2217 vmbus_process_chevq(void *arg) 2218 { 2219 struct vmbus_softc *sc = arg; 2220 struct vmbus_chev *vce; 2221 struct vmbus_chanmsg_choffer *co; 2222 struct vmbus_chanmsg_chrescind *cr; 2223 2224 KASSERT(mutex_owned(&sc->sc_chevq_lock)); 2225 2226 while (!SIMPLEQ_EMPTY(&sc->sc_chevq)) { 2227 vce = SIMPLEQ_FIRST(&sc->sc_chevq); 2228 SIMPLEQ_REMOVE_HEAD(&sc->sc_chevq, vce_entry); 2229 mutex_exit(&sc->sc_chevq_lock); 2230 2231 switch (vce->vce_type) { 2232 case VMBUS_CHEV_TYPE_OFFER: 2233 co = vce->vce_arg; 2234 vmbus_process_offer(sc, co); 2235 kmem_free(co, sizeof(*co)); 2236 break; 2237 2238 case VMBUS_CHEV_TYPE_RESCIND: 2239 cr = vce->vce_arg; 2240 vmbus_process_rescind(sc, cr); 2241 kmem_free(cr, sizeof(*cr)); 2242 break; 2243 2244 default: 2245 DPRINTF("%s: unknown chevq type %d\n", 2246 device_xname(sc->sc_dev), vce->vce_type); 2247 break; 2248 } 2249 kmem_free(vce, sizeof(*vce)); 2250 2251 mutex_enter(&sc->sc_chevq_lock); 2252 } 2253 } 2254 2255 static void 2256 vmbus_chevq_thread(void *arg) 2257 { 2258 struct vmbus_softc *sc = arg; 2259 2260 mutex_enter(&sc->sc_chevq_lock); 2261 for (;;) { 2262 if (SIMPLEQ_EMPTY(&sc->sc_chevq)) { 2263 cv_wait(&sc->sc_chevq_cv, &sc->sc_chevq_lock); 2264 continue; 2265 } 2266 2267 vmbus_process_chevq(sc); 2268 } 2269 mutex_exit(&sc->sc_chevq_lock); 2270 2271 kthread_exit(0); 2272 } 2273 2274 static void 2275 vmbus_devq_enqueue(struct vmbus_softc *sc, int type, struct vmbus_channel *ch) 2276 { 2277 struct vmbus_dev *vd; 2278 2279 vd = kmem_zalloc(sizeof(*vd), KM_SLEEP); 2280 if (vd == NULL) { 2281 device_printf(sc->sc_dev, "failed to allocate devq\n"); 2282 return; 2283 } 2284 2285 vd->vd_type = type; 2286 vd->vd_chan = ch; 2287 2288 if (VMBUS_CHAN_ISPRIMARY(ch)) { 2289 mutex_enter(&sc->sc_devq_lock); 2290 SIMPLEQ_INSERT_TAIL(&sc->sc_devq, vd, vd_entry); 2291 cv_broadcast(&sc->sc_devq_cv); 2292 mutex_exit(&sc->sc_devq_lock); 2293 } else { 2294 mutex_enter(&sc->sc_subch_devq_lock); 2295 SIMPLEQ_INSERT_TAIL(&sc->sc_subch_devq, vd, vd_entry); 2296 cv_broadcast(&sc->sc_subch_devq_cv); 2297 mutex_exit(&sc->sc_subch_devq_lock); 2298 } 2299 } 2300 2301 static void 2302 vmbus_process_devq(void *arg) 2303 { 2304 struct vmbus_softc *sc = arg; 2305 struct vmbus_dev *vd; 2306 struct vmbus_channel *ch; 2307 struct vmbus_attach_args vaa; 2308 2309 KASSERT(mutex_owned(&sc->sc_devq_lock)); 2310 2311 while (!SIMPLEQ_EMPTY(&sc->sc_devq)) { 2312 vd = SIMPLEQ_FIRST(&sc->sc_devq); 2313 SIMPLEQ_REMOVE_HEAD(&sc->sc_devq, vd_entry); 2314 mutex_exit(&sc->sc_devq_lock); 2315 2316 switch (vd->vd_type) { 2317 case VMBUS_DEV_TYPE_ATTACH: 2318 ch = vd->vd_chan; 2319 vaa.aa_type = &ch->ch_type; 2320 vaa.aa_inst = &ch->ch_inst; 2321 vaa.aa_ident = ch->ch_ident; 2322 vaa.aa_chan = ch; 2323 vaa.aa_iot = sc->sc_iot; 2324 vaa.aa_memt = sc->sc_memt; 2325 ch->ch_dev = config_found(sc->sc_dev, 2326 &vaa, vmbus_attach_print, CFARGS_NONE); 2327 break; 2328 2329 case VMBUS_DEV_TYPE_DETACH: 2330 ch = vd->vd_chan; 2331 if (ch->ch_dev != NULL) { 2332 config_detach(ch->ch_dev, DETACH_FORCE); 2333 ch->ch_dev = NULL; 2334 } 2335 vmbus_channel_release(ch); 2336 vmbus_channel_free(ch); 2337 break; 2338 2339 default: 2340 DPRINTF("%s: unknown devq type %d\n", 2341 device_xname(sc->sc_dev), vd->vd_type); 2342 break; 2343 } 2344 kmem_free(vd, sizeof(*vd)); 2345 2346 mutex_enter(&sc->sc_devq_lock); 2347 } 2348 } 2349 2350 static void 2351 vmbus_devq_thread(void *arg) 2352 { 2353 struct vmbus_softc *sc = arg; 2354 2355 mutex_enter(&sc->sc_devq_lock); 2356 for (;;) { 2357 if (SIMPLEQ_EMPTY(&sc->sc_devq)) { 2358 cv_wait(&sc->sc_devq_cv, &sc->sc_devq_lock); 2359 continue; 2360 } 2361 2362 vmbus_process_devq(sc); 2363 } 2364 mutex_exit(&sc->sc_devq_lock); 2365 2366 kthread_exit(0); 2367 } 2368 2369 static void 2370 vmbus_subchannel_devq_thread(void *arg) 2371 { 2372 struct vmbus_softc *sc = arg; 2373 struct vmbus_dev *vd; 2374 struct vmbus_channel *ch, *prich; 2375 2376 mutex_enter(&sc->sc_subch_devq_lock); 2377 for (;;) { 2378 if (SIMPLEQ_EMPTY(&sc->sc_subch_devq)) { 2379 cv_wait(&sc->sc_subch_devq_cv, &sc->sc_subch_devq_lock); 2380 continue; 2381 } 2382 2383 while (!SIMPLEQ_EMPTY(&sc->sc_subch_devq)) { 2384 vd = SIMPLEQ_FIRST(&sc->sc_subch_devq); 2385 SIMPLEQ_REMOVE_HEAD(&sc->sc_subch_devq, vd_entry); 2386 mutex_exit(&sc->sc_subch_devq_lock); 2387 2388 switch (vd->vd_type) { 2389 case VMBUS_DEV_TYPE_ATTACH: 2390 /* Nothing to do */ 2391 break; 2392 2393 case VMBUS_DEV_TYPE_DETACH: 2394 ch = vd->vd_chan; 2395 2396 vmbus_channel_release(ch); 2397 2398 prich = ch->ch_primary_channel; 2399 mutex_enter(&prich->ch_subchannel_lock); 2400 TAILQ_REMOVE(&prich->ch_subchannels, ch, 2401 ch_subentry); 2402 prich->ch_subchannel_count--; 2403 cv_signal(&prich->ch_subchannel_cv); 2404 mutex_exit(&prich->ch_subchannel_lock); 2405 2406 vmbus_channel_free(ch); 2407 break; 2408 2409 default: 2410 DPRINTF("%s: unknown devq type %d\n", 2411 device_xname(sc->sc_dev), vd->vd_type); 2412 break; 2413 } 2414 2415 kmem_free(vd, sizeof(*vd)); 2416 2417 mutex_enter(&sc->sc_subch_devq_lock); 2418 } 2419 } 2420 mutex_exit(&sc->sc_subch_devq_lock); 2421 2422 kthread_exit(0); 2423 } 2424 2425 2426 static int 2427 vmbus_attach_print(void *aux, const char *name) 2428 { 2429 struct vmbus_attach_args *aa = aux; 2430 2431 if (name) 2432 printf("\"%s\" at %s", aa->aa_ident, name); 2433 2434 return UNCONF; 2435 } 2436 2437 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv"); 2438 2439 #ifdef _MODULE 2440 #include "ioconf.c" 2441 #endif 2442 2443 static int 2444 vmbus_modcmd(modcmd_t cmd, void *aux) 2445 { 2446 int rv = 0; 2447 2448 switch (cmd) { 2449 case MODULE_CMD_INIT: 2450 #ifdef _MODULE 2451 rv = config_init_component(cfdriver_ioconf_vmbus, 2452 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2453 #endif 2454 break; 2455 2456 case MODULE_CMD_FINI: 2457 #ifdef _MODULE 2458 rv = config_fini_component(cfdriver_ioconf_vmbus, 2459 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus); 2460 #endif 2461 break; 2462 2463 default: 2464 rv = ENOTTY; 2465 break; 2466 } 2467 2468 return rv; 2469 } 2470