subr_ipi.c revision 1.1 1 /* $NetBSD: subr_ipi.c,v 1.1 2014/05/19 22:47:54 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Inter-processor interrupt (IPI) interface with cross-call support.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.1 2014/05/19 22:47:54 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/evcnt.h>
44 #include <sys/cpu.h>
45 #include <sys/ipi.h>
46 #include <sys/kcpuset.h>
47 #include <sys/kmem.h>
48 #include <sys/lock.h>
49
50 /*
51 * Per-CPU mailbox for IPI messages: it is a single cache line storing
52 * up to IPI_MSG_MAX messages.
53 */
54
55 #define IPI_MSG_SLOTS (CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
56 #define IPI_MSG_MAX IPI_MSG_SLOTS
57
58 typedef struct {
59 ipi_msg_t * msg[IPI_MSG_SLOTS];
60 } ipi_mbox_t;
61
62 static ipi_mbox_t * ipi_mboxes __read_mostly;
63 static struct evcnt ipi_mboxfull_ev __cacheline_aligned;
64
65 #ifndef MULTIPROCESSOR
66 #define cpu_ipi(ci) KASSERT(ci == NULL)
67 #endif
68
69 void
70 ipi_sysinit(void)
71 {
72 const size_t len = ncpu * sizeof(ipi_mbox_t);
73
74 /* Allocate per-CPU IPI mailboxes. */
75 ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
76 KASSERT(ipi_mboxes != NULL);
77
78 evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
79 "ipi", "full");
80 }
81
82 /*
83 * put_msg: insert message into the mailbox.
84 */
85 static inline void
86 put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
87 {
88 int count = SPINLOCK_BACKOFF_MIN;
89 again:
90 for (u_int i = 0; i < IPI_MSG_MAX; i++) {
91 if (__predict_true(mbox->msg[i] == NULL) &&
92 atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
93 return;
94 }
95 }
96
97 /* All slots are full: we have to spin-wait. */
98 ipi_mboxfull_ev.ev_count++;
99 SPINLOCK_BACKOFF(count);
100 goto again;
101 }
102
103 /*
104 * ipi_cpu_handler: the IPI handler.
105 */
106 void
107 ipi_cpu_handler(void)
108 {
109 const struct cpu_info * const ci = curcpu();
110 ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
111
112 KASSERT(curcpu() == ci);
113
114 for (u_int i = 0; i < IPI_MSG_MAX; i++) {
115 ipi_msg_t *msg;
116
117 /* Get the message. */
118 if ((msg = mbox->msg[i]) == NULL) {
119 continue;
120 }
121 mbox->msg[i] = NULL;
122
123 /* Execute the handler. */
124 KASSERT(msg->func);
125 msg->func(msg->arg);
126
127 /* Ack the request. */
128 atomic_dec_uint(&msg->_pending);
129 }
130 }
131
132 /*
133 * ipi_unicast: send an IPI to a single CPU.
134 *
135 * => The CPU must be remote; must not be local.
136 * => The caller must ipi_wait() on the message for completion.
137 */
138 void
139 ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
140 {
141 const cpuid_t id = cpu_index(ci);
142
143 KASSERT(msg->func != NULL);
144 KASSERT(kpreempt_disabled());
145 KASSERT(curcpu() != ci);
146
147 msg->_pending = 1;
148 membar_producer();
149
150 put_msg(&ipi_mboxes[id], msg);
151 cpu_ipi(ci);
152 }
153
154 /*
155 * ipi_multicast: send an IPI to each CPU in the specified set.
156 *
157 * => The caller must ipi_wait() on the message for completion.
158 */
159 void
160 ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
161 {
162 const struct cpu_info * const self = curcpu();
163 CPU_INFO_ITERATOR cii;
164 struct cpu_info *ci;
165 u_int local;
166
167 KASSERT(msg->func != NULL);
168 KASSERT(kpreempt_disabled());
169
170 local = !!kcpuset_isset(target, cpu_index(self));
171 msg->_pending = kcpuset_countset(target) - local;
172 membar_producer();
173
174 for (CPU_INFO_FOREACH(cii, ci)) {
175 cpuid_t id;
176
177 if (__predict_false(ci == self)) {
178 continue;
179 }
180 id = cpu_index(ci);
181 if (!kcpuset_isset(target, id)) {
182 continue;
183 }
184 put_msg(&ipi_mboxes[id], msg);
185 cpu_ipi(ci);
186 }
187 if (local) {
188 msg->func(msg->arg);
189 }
190 }
191
192 /*
193 * ipi_broadcast: send an IPI to all CPUs.
194 *
195 * => The caller must ipi_wait() on the message for completion.
196 */
197 void
198 ipi_broadcast(ipi_msg_t *msg)
199 {
200 const struct cpu_info * const self = curcpu();
201 CPU_INFO_ITERATOR cii;
202 struct cpu_info *ci;
203
204 KASSERT(msg->func != NULL);
205 KASSERT(kpreempt_disabled());
206
207 msg->_pending = ncpu - 1;
208 membar_producer();
209
210 /* Broadcast IPIs for remote CPUs. */
211 for (CPU_INFO_FOREACH(cii, ci)) {
212 cpuid_t id;
213
214 if (__predict_false(ci == self)) {
215 continue;
216 }
217 id = cpu_index(ci);
218 put_msg(&ipi_mboxes[id], msg);
219 }
220 cpu_ipi(NULL);
221
222 /* Finally, execute locally. */
223 msg->func(msg->arg);
224 }
225
226 /*
227 * ipi_wait: spin-wait until the message is processed.
228 */
229 void
230 ipi_wait(ipi_msg_t *msg)
231 {
232 int count = SPINLOCK_BACKOFF_MIN;
233
234 while (msg->_pending) {
235 KASSERT(msg->_pending < ncpu);
236 SPINLOCK_BACKOFF(count);
237 }
238 }
239