1 | /* $NetBSD: evtchn.c,v 1.71 2015/03/14 10:49:36 bouyer Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 2006 Manuel Bouyer. |
5 | * |
6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions |
8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * |
26 | */ |
27 | |
28 | /* |
29 | * |
30 | * Copyright (c) 2004 Christian Limpach. |
31 | * Copyright (c) 2004, K A Fraser. |
32 | * All rights reserved. |
33 | * |
34 | * Redistribution and use in source and binary forms, with or without |
35 | * modification, are permitted provided that the following conditions |
36 | * are met: |
37 | * 1. Redistributions of source code must retain the above copyright |
38 | * notice, this list of conditions and the following disclaimer. |
39 | * 2. Redistributions in binary form must reproduce the above copyright |
40 | * notice, this list of conditions and the following disclaimer in the |
41 | * documentation and/or other materials provided with the distribution. |
42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
44 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
45 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
46 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
47 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
48 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
49 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
50 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
51 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
52 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
53 | */ |
54 | |
55 | |
56 | #include <sys/cdefs.h> |
57 | __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.71 2015/03/14 10:49:36 bouyer Exp $" ); |
58 | |
59 | #include "opt_xen.h" |
60 | #include "isa.h" |
61 | #include "pci.h" |
62 | |
63 | #include <sys/param.h> |
64 | #include <sys/cpu.h> |
65 | #include <sys/kernel.h> |
66 | #include <sys/systm.h> |
67 | #include <sys/device.h> |
68 | #include <sys/proc.h> |
69 | #include <sys/kmem.h> |
70 | #include <sys/reboot.h> |
71 | #include <sys/mutex.h> |
72 | |
73 | #include <uvm/uvm.h> |
74 | |
75 | #include <machine/intrdefs.h> |
76 | |
77 | #include <xen/xen.h> |
78 | #include <xen/hypervisor.h> |
79 | #include <xen/evtchn.h> |
80 | #include <xen/xenfunc.h> |
81 | |
82 | /* |
83 | * This lock protects updates to the following mapping and reference-count |
84 | * arrays. The lock does not need to be acquired to read the mapping tables. |
85 | */ |
86 | static kmutex_t evtchn_lock; |
87 | |
88 | /* event handlers */ |
89 | struct evtsource *evtsource[NR_EVENT_CHANNELS]; |
90 | |
91 | /* channel locks */ |
92 | static kmutex_t evtlock[NR_EVENT_CHANNELS]; |
93 | |
94 | /* Reference counts for bindings to event channels XXX: redo for SMP */ |
95 | static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; |
96 | |
97 | /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ |
98 | static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
99 | |
100 | /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ |
101 | static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
102 | |
103 | /* event-channel <-> VIRQ mapping. */ |
104 | static int virq_to_evtch[NR_VIRQS]; |
105 | |
106 | |
107 | #if NPCI > 0 || NISA > 0 |
108 | /* event-channel <-> PIRQ mapping */ |
109 | static int pirq_to_evtch[NR_PIRQS]; |
110 | /* PIRQ needing notify */ |
111 | static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; |
112 | int pirq_interrupt(void *); |
113 | physdev_op_t physdev_op_notify = { |
114 | .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, |
115 | }; |
116 | #endif |
117 | |
118 | int debug_port = -1; |
119 | |
120 | // #define IRQ_DEBUG 4 |
121 | |
122 | /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ |
123 | #ifdef MULTIPROCESSOR |
124 | |
125 | /* |
126 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. |
127 | */ |
128 | |
129 | int |
130 | intr_biglock_wrapper(void *vp) |
131 | { |
132 | struct intrhand *ih = vp; |
133 | int ret; |
134 | |
135 | KERNEL_LOCK(1, NULL); |
136 | |
137 | ret = (*ih->ih_realfun)(ih->ih_realarg); |
138 | |
139 | KERNEL_UNLOCK_ONE(NULL); |
140 | |
141 | return ret; |
142 | } |
143 | #endif /* MULTIPROCESSOR */ |
144 | |
145 | void |
146 | events_default_setup(void) |
147 | { |
148 | int i; |
149 | |
150 | /* No VCPU -> event mappings. */ |
151 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
152 | vcpu_ipi_to_evtch[i] = -1; |
153 | |
154 | /* No VIRQ_TIMER -> event mappings. */ |
155 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
156 | virq_timer_to_evtch[i] = -1; |
157 | |
158 | /* No VIRQ -> event mappings. */ |
159 | for (i = 0; i < NR_VIRQS; i++) |
160 | virq_to_evtch[i] = -1; |
161 | |
162 | #if NPCI > 0 || NISA > 0 |
163 | /* No PIRQ -> event mappings. */ |
164 | for (i = 0; i < NR_PIRQS; i++) |
165 | pirq_to_evtch[i] = -1; |
166 | for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) |
167 | pirq_needs_unmask_notify[i] = 0; |
168 | #endif |
169 | |
170 | /* No event-channel are 'live' right now. */ |
171 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
172 | evtsource[i] = NULL; |
173 | evtch_bindcount[i] = 0; |
174 | hypervisor_mask_event(i); |
175 | } |
176 | |
177 | } |
178 | |
179 | void |
180 | events_init(void) |
181 | { |
182 | mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); |
183 | debug_port = bind_virq_to_evtch(VIRQ_DEBUG); |
184 | |
185 | KASSERT(debug_port != -1); |
186 | |
187 | aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n" , |
188 | debug_port); |
189 | /* |
190 | * Don't call event_set_handler(), we'll use a shortcut. Just set |
191 | * evtsource[] to a non-NULL value so that evtchn_do_event will |
192 | * be called. |
193 | */ |
194 | evtsource[debug_port] = (void *)-1; |
195 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); |
196 | hypervisor_enable_event(debug_port); |
197 | |
198 | x86_enable_intr(); /* at long last... */ |
199 | } |
200 | |
201 | bool |
202 | events_suspend(void) |
203 | { |
204 | int evtch; |
205 | |
206 | x86_disable_intr(); |
207 | |
208 | /* VIRQ_DEBUG is the last interrupt to remove */ |
209 | evtch = unbind_virq_from_evtch(VIRQ_DEBUG); |
210 | |
211 | KASSERT(evtch != -1); |
212 | |
213 | hypervisor_mask_event(evtch); |
214 | /* Remove the non-NULL value set in events_init() */ |
215 | evtsource[evtch] = NULL; |
216 | aprint_verbose("VIRQ_DEBUG interrupt disabled, " |
217 | "event channel %d removed\n" , evtch); |
218 | |
219 | return true; |
220 | } |
221 | |
222 | bool |
223 | events_resume (void) |
224 | { |
225 | events_init(); |
226 | |
227 | return true; |
228 | } |
229 | |
230 | |
231 | unsigned int |
232 | evtchn_do_event(int evtch, struct intrframe *regs) |
233 | { |
234 | struct cpu_info *ci; |
235 | int ilevel; |
236 | struct intrhand *ih; |
237 | int (*ih_fun)(void *, void *); |
238 | uint32_t iplmask; |
239 | int i; |
240 | uint32_t iplbit; |
241 | |
242 | #ifdef DIAGNOSTIC |
243 | if (evtch >= NR_EVENT_CHANNELS) { |
244 | printf("event number %d > NR_IRQS\n" , evtch); |
245 | panic("evtchn_do_event" ); |
246 | } |
247 | #endif |
248 | |
249 | #ifdef IRQ_DEBUG |
250 | if (evtch == IRQ_DEBUG) |
251 | printf("evtchn_do_event: evtch %d\n" , evtch); |
252 | #endif |
253 | ci = curcpu(); |
254 | |
255 | /* |
256 | * Shortcut for the debug handler, we want it to always run, |
257 | * regardless of the IPL level. |
258 | */ |
259 | if (__predict_false(evtch == debug_port)) { |
260 | xen_debug_handler(NULL); |
261 | hypervisor_enable_event(evtch); |
262 | return 0; |
263 | } |
264 | |
265 | #ifdef DIAGNOSTIC |
266 | if (evtsource[evtch] == NULL) { |
267 | panic("evtchn_do_event: unknown event" ); |
268 | } |
269 | #endif |
270 | ci->ci_data.cpu_nintr++; |
271 | evtsource[evtch]->ev_evcnt.ev_count++; |
272 | ilevel = ci->ci_ilevel; |
273 | |
274 | if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) { |
275 | hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch); |
276 | return 0; |
277 | } |
278 | |
279 | if (evtsource[evtch]->ev_maxlevel <= ilevel) { |
280 | #ifdef IRQ_DEBUG |
281 | if (evtch == IRQ_DEBUG) |
282 | printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n" , |
283 | evtch, evtsource[evtch]->ev_maxlevel, ilevel); |
284 | #endif |
285 | hypervisor_set_ipending(evtsource[evtch]->ev_imask, |
286 | evtch >> LONG_SHIFT, |
287 | evtch & LONG_MASK); |
288 | |
289 | /* leave masked */ |
290 | |
291 | return 0; |
292 | } |
293 | ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; |
294 | iplmask = evtsource[evtch]->ev_imask; |
295 | sti(); |
296 | mutex_spin_enter(&evtlock[evtch]); |
297 | ih = evtsource[evtch]->ev_handlers; |
298 | while (ih != NULL) { |
299 | if (ih->ih_cpu != ci) { |
300 | hypervisor_send_event(ih->ih_cpu, evtch); |
301 | iplmask &= ~IUNMASK(ci, ih->ih_level); |
302 | ih = ih->ih_evt_next; |
303 | continue; |
304 | } |
305 | if (ih->ih_level <= ilevel) { |
306 | #ifdef IRQ_DEBUG |
307 | if (evtch == IRQ_DEBUG) |
308 | printf("ih->ih_level %d <= ilevel %d\n" , ih->ih_level, ilevel); |
309 | #endif |
310 | cli(); |
311 | hypervisor_set_ipending(iplmask, |
312 | evtch >> LONG_SHIFT, evtch & LONG_MASK); |
313 | /* leave masked */ |
314 | mutex_spin_exit(&evtlock[evtch]); |
315 | goto splx; |
316 | } |
317 | iplmask &= ~IUNMASK(ci, ih->ih_level); |
318 | ci->ci_ilevel = ih->ih_level; |
319 | ih_fun = (void *)ih->ih_fun; |
320 | ih_fun(ih->ih_arg, regs); |
321 | ih = ih->ih_evt_next; |
322 | } |
323 | mutex_spin_exit(&evtlock[evtch]); |
324 | cli(); |
325 | hypervisor_enable_event(evtch); |
326 | splx: |
327 | /* |
328 | * C version of spllower(). ASTs will be checked when |
329 | * hypevisor_callback() exits, so no need to check here. |
330 | */ |
331 | iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending); |
332 | while (iplmask != 0) { |
333 | iplbit = 1 << (NIPL - 1); |
334 | i = (NIPL - 1); |
335 | while (iplmask != 0 && i > ilevel) { |
336 | while (iplmask & iplbit) { |
337 | ci->ci_ipending &= ~iplbit; |
338 | ci->ci_ilevel = i; |
339 | for (ih = ci->ci_isources[i]->ipl_handlers; |
340 | ih != NULL; ih = ih->ih_ipl_next) { |
341 | KASSERT(ih->ih_cpu == ci); |
342 | sti(); |
343 | ih_fun = (void *)ih->ih_fun; |
344 | ih_fun(ih->ih_arg, regs); |
345 | cli(); |
346 | } |
347 | hypervisor_enable_ipl(i); |
348 | /* more pending IPLs may have been registered */ |
349 | iplmask = |
350 | (IUNMASK(ci, ilevel) & ci->ci_ipending); |
351 | } |
352 | i--; |
353 | iplbit >>= 1; |
354 | } |
355 | } |
356 | ci->ci_ilevel = ilevel; |
357 | return 0; |
358 | } |
359 | |
360 | #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ |
361 | |
362 | evtchn_port_t |
363 | bind_vcpu_to_evtch(cpuid_t vcpu) |
364 | { |
365 | evtchn_op_t op; |
366 | evtchn_port_t evtchn; |
367 | |
368 | mutex_spin_enter(&evtchn_lock); |
369 | |
370 | evtchn = vcpu_ipi_to_evtch[vcpu]; |
371 | if (evtchn == -1) { |
372 | op.cmd = EVTCHNOP_bind_ipi; |
373 | op.u.bind_ipi.vcpu = (uint32_t) vcpu; |
374 | if (HYPERVISOR_event_channel_op(&op) != 0) |
375 | panic("Failed to bind ipi to VCPU %" PRIuCPUID"\n" , vcpu); |
376 | evtchn = op.u.bind_ipi.port; |
377 | |
378 | vcpu_ipi_to_evtch[vcpu] = evtchn; |
379 | } |
380 | |
381 | evtch_bindcount[evtchn]++; |
382 | |
383 | mutex_spin_exit(&evtchn_lock); |
384 | |
385 | return evtchn; |
386 | } |
387 | |
388 | int |
389 | bind_virq_to_evtch(int virq) |
390 | { |
391 | evtchn_op_t op; |
392 | int evtchn; |
393 | |
394 | mutex_spin_enter(&evtchn_lock); |
395 | |
396 | /* |
397 | * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. |
398 | * Please re-visit this implementation when others are used. |
399 | * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. |
400 | * XXX: event->virq/ipi can be unified in a linked-list |
401 | * implementation. |
402 | */ |
403 | struct cpu_info *ci = curcpu(); |
404 | |
405 | if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { |
406 | mutex_spin_exit(&evtchn_lock); |
407 | return -1; |
408 | } |
409 | |
410 | if (virq == VIRQ_TIMER) { |
411 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; |
412 | } else { |
413 | evtchn = virq_to_evtch[virq]; |
414 | } |
415 | |
416 | /* Allocate a channel if there is none already allocated */ |
417 | if (evtchn == -1) { |
418 | op.cmd = EVTCHNOP_bind_virq; |
419 | op.u.bind_virq.virq = virq; |
420 | op.u.bind_virq.vcpu = ci->ci_cpuid; |
421 | if (HYPERVISOR_event_channel_op(&op) != 0) |
422 | panic("Failed to bind virtual IRQ %d\n" , virq); |
423 | evtchn = op.u.bind_virq.port; |
424 | } |
425 | |
426 | /* Set event channel */ |
427 | if (virq == VIRQ_TIMER) { |
428 | virq_timer_to_evtch[ci->ci_cpuid] = evtchn; |
429 | } else { |
430 | virq_to_evtch[virq] = evtchn; |
431 | } |
432 | |
433 | /* Increase ref counter */ |
434 | evtch_bindcount[evtchn]++; |
435 | |
436 | mutex_spin_exit(&evtchn_lock); |
437 | |
438 | return evtchn; |
439 | } |
440 | |
441 | int |
442 | unbind_virq_from_evtch(int virq) |
443 | { |
444 | evtchn_op_t op; |
445 | int evtchn; |
446 | |
447 | struct cpu_info *ci = curcpu(); |
448 | |
449 | if (virq == VIRQ_TIMER) { |
450 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; |
451 | } |
452 | else { |
453 | evtchn = virq_to_evtch[virq]; |
454 | } |
455 | |
456 | if (evtchn == -1) { |
457 | return -1; |
458 | } |
459 | |
460 | mutex_spin_enter(&evtchn_lock); |
461 | |
462 | evtch_bindcount[evtchn]--; |
463 | if (evtch_bindcount[evtchn] == 0) { |
464 | op.cmd = EVTCHNOP_close; |
465 | op.u.close.port = evtchn; |
466 | if (HYPERVISOR_event_channel_op(&op) != 0) |
467 | panic("Failed to unbind virtual IRQ %d\n" , virq); |
468 | |
469 | if (virq == VIRQ_TIMER) { |
470 | virq_timer_to_evtch[ci->ci_cpuid] = -1; |
471 | } else { |
472 | virq_to_evtch[virq] = -1; |
473 | } |
474 | } |
475 | |
476 | mutex_spin_exit(&evtchn_lock); |
477 | |
478 | return evtchn; |
479 | } |
480 | |
481 | #if NPCI > 0 || NISA > 0 |
482 | int |
483 | bind_pirq_to_evtch(int pirq) |
484 | { |
485 | evtchn_op_t op; |
486 | int evtchn; |
487 | |
488 | if (pirq >= NR_PIRQS) { |
489 | panic("pirq %d out of bound, increase NR_PIRQS" , pirq); |
490 | } |
491 | |
492 | mutex_spin_enter(&evtchn_lock); |
493 | |
494 | evtchn = pirq_to_evtch[pirq]; |
495 | if (evtchn == -1) { |
496 | op.cmd = EVTCHNOP_bind_pirq; |
497 | op.u.bind_pirq.pirq = pirq; |
498 | op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; |
499 | if (HYPERVISOR_event_channel_op(&op) != 0) |
500 | panic("Failed to bind physical IRQ %d\n" , pirq); |
501 | evtchn = op.u.bind_pirq.port; |
502 | |
503 | #ifdef IRQ_DEBUG |
504 | printf("pirq %d evtchn %d\n" , pirq, evtchn); |
505 | #endif |
506 | pirq_to_evtch[pirq] = evtchn; |
507 | } |
508 | |
509 | evtch_bindcount[evtchn]++; |
510 | |
511 | mutex_spin_exit(&evtchn_lock); |
512 | |
513 | return evtchn; |
514 | } |
515 | |
516 | int |
517 | unbind_pirq_from_evtch(int pirq) |
518 | { |
519 | evtchn_op_t op; |
520 | int evtchn = pirq_to_evtch[pirq]; |
521 | |
522 | mutex_spin_enter(&evtchn_lock); |
523 | |
524 | evtch_bindcount[evtchn]--; |
525 | if (evtch_bindcount[evtchn] == 0) { |
526 | op.cmd = EVTCHNOP_close; |
527 | op.u.close.port = evtchn; |
528 | if (HYPERVISOR_event_channel_op(&op) != 0) |
529 | panic("Failed to unbind physical IRQ %d\n" , pirq); |
530 | |
531 | pirq_to_evtch[pirq] = -1; |
532 | } |
533 | |
534 | mutex_spin_exit(&evtchn_lock); |
535 | |
536 | return evtchn; |
537 | } |
538 | |
539 | struct pintrhand * |
540 | pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, |
541 | const char *evname) |
542 | { |
543 | struct pintrhand *ih; |
544 | physdev_op_t physdev_op; |
545 | |
546 | ih = kmem_zalloc(sizeof(struct pintrhand), |
547 | cold ? KM_NOSLEEP : KM_SLEEP); |
548 | if (ih == NULL) { |
549 | printf("pirq_establish: can't allocate handler info\n" ); |
550 | return NULL; |
551 | } |
552 | |
553 | ih->pirq = pirq; |
554 | ih->evtch = evtch; |
555 | ih->func = func; |
556 | ih->arg = arg; |
557 | |
558 | if (event_set_handler(evtch, pirq_interrupt, ih, level, evname) != 0) { |
559 | kmem_free(ih, sizeof(struct pintrhand)); |
560 | return NULL; |
561 | } |
562 | |
563 | physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; |
564 | physdev_op.u.irq_status_query.irq = pirq; |
565 | if (HYPERVISOR_physdev_op(&physdev_op) < 0) |
566 | panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)" ); |
567 | if (physdev_op.u.irq_status_query.flags & |
568 | PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { |
569 | pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); |
570 | #ifdef IRQ_DEBUG |
571 | printf("pirq %d needs notify\n" , pirq); |
572 | #endif |
573 | } |
574 | hypervisor_enable_event(evtch); |
575 | return ih; |
576 | } |
577 | |
578 | void |
579 | pirq_disestablish(struct pintrhand *ih) |
580 | { |
581 | int error = event_remove_handler(ih->evtch, pirq_interrupt, ih); |
582 | if (error) { |
583 | printf("pirq_disestablish(%p): %d\n" , ih, error); |
584 | return; |
585 | } |
586 | kmem_free(ih, sizeof(struct pintrhand)); |
587 | } |
588 | |
589 | int |
590 | pirq_interrupt(void *arg) |
591 | { |
592 | struct pintrhand *ih = arg; |
593 | int ret; |
594 | |
595 | |
596 | ret = ih->func(ih->arg); |
597 | #ifdef IRQ_DEBUG |
598 | if (ih->evtch == IRQ_DEBUG) |
599 | printf("pirq_interrupt irq %d ret %d\n" , ih->pirq, ret); |
600 | #endif |
601 | return ret; |
602 | } |
603 | |
604 | #endif /* NPCI > 0 || NISA > 0 */ |
605 | |
606 | |
607 | /* |
608 | * Recalculate the interrupt from scratch for an event source. |
609 | */ |
610 | static void |
611 | intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) |
612 | { |
613 | struct intrhand *ih; |
614 | int cpu_receive = 0; |
615 | |
616 | #ifdef MULTIPROCESSOR |
617 | KASSERT(!mutex_owned(&evtlock[evtch])); |
618 | #endif |
619 | mutex_spin_enter(&evtlock[evtch]); |
620 | evts->ev_maxlevel = IPL_NONE; |
621 | evts->ev_imask = 0; |
622 | for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { |
623 | if (ih->ih_level > evts->ev_maxlevel) |
624 | evts->ev_maxlevel = ih->ih_level; |
625 | evts->ev_imask |= (1 << ih->ih_level); |
626 | if (ih->ih_cpu == ci) |
627 | cpu_receive = 1; |
628 | } |
629 | if (cpu_receive) |
630 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); |
631 | else |
632 | xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); |
633 | mutex_spin_exit(&evtlock[evtch]); |
634 | } |
635 | |
636 | int |
637 | event_set_handler(int evtch, int (*func)(void *), void *arg, int level, |
638 | const char *evname) |
639 | { |
640 | struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ |
641 | struct evtsource *evts; |
642 | struct intrhand *ih, **ihp; |
643 | int s; |
644 | #ifdef MULTIPROCESSOR |
645 | bool mpsafe = (level != IPL_VM); |
646 | #endif /* MULTIPROCESSOR */ |
647 | |
648 | #ifdef IRQ_DEBUG |
649 | printf("event_set_handler IRQ %d handler %p\n" , evtch, func); |
650 | #endif |
651 | |
652 | #ifdef DIAGNOSTIC |
653 | if (evtch >= NR_EVENT_CHANNELS) { |
654 | printf("evtch number %d > NR_EVENT_CHANNELS\n" , evtch); |
655 | panic("event_set_handler" ); |
656 | } |
657 | #endif |
658 | |
659 | #if 0 |
660 | printf("event_set_handler evtch %d handler %p level %d\n" , evtch, |
661 | handler, level); |
662 | #endif |
663 | ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); |
664 | if (ih == NULL) |
665 | panic("can't allocate fixed interrupt source" ); |
666 | |
667 | |
668 | ih->ih_level = level; |
669 | ih->ih_fun = ih->ih_realfun = func; |
670 | ih->ih_arg = ih->ih_realarg = arg; |
671 | ih->ih_evt_next = NULL; |
672 | ih->ih_ipl_next = NULL; |
673 | ih->ih_cpu = ci; |
674 | #ifdef MULTIPROCESSOR |
675 | if (!mpsafe) { |
676 | ih->ih_fun = intr_biglock_wrapper; |
677 | ih->ih_arg = ih; |
678 | } |
679 | #endif /* MULTIPROCESSOR */ |
680 | |
681 | s = splhigh(); |
682 | |
683 | /* register per-cpu handler for spllower() */ |
684 | event_set_iplhandler(ci, ih, level); |
685 | |
686 | /* register handler for event channel */ |
687 | if (evtsource[evtch] == NULL) { |
688 | evts = kmem_zalloc(sizeof (struct evtsource), |
689 | KM_NOSLEEP); |
690 | if (evts == NULL) |
691 | panic("can't allocate fixed interrupt source" ); |
692 | |
693 | evts->ev_handlers = ih; |
694 | /* |
695 | * XXX: We're assuming here that ci is the same cpu as |
696 | * the one on which this event/port is bound on. The |
697 | * api needs to be reshuffled so that this assumption |
698 | * is more explicitly implemented. |
699 | */ |
700 | evts->ev_cpu = ci; |
701 | mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); |
702 | evtsource[evtch] = evts; |
703 | if (evname) |
704 | strncpy(evts->ev_evname, evname, |
705 | sizeof(evts->ev_evname)); |
706 | else |
707 | snprintf(evts->ev_evname, sizeof(evts->ev_evname), |
708 | "evt%d" , evtch); |
709 | evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, |
710 | device_xname(ci->ci_dev), evts->ev_evname); |
711 | } else { |
712 | evts = evtsource[evtch]; |
713 | /* sort by IPL order, higher first */ |
714 | mutex_spin_enter(&evtlock[evtch]); |
715 | for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { |
716 | if ((*ihp)->ih_level < ih->ih_level) { |
717 | /* insert before *ihp */ |
718 | ih->ih_evt_next = *ihp; |
719 | *ihp = ih; |
720 | break; |
721 | } |
722 | if ((*ihp)->ih_evt_next == NULL) { |
723 | (*ihp)->ih_evt_next = ih; |
724 | break; |
725 | } |
726 | } |
727 | mutex_spin_exit(&evtlock[evtch]); |
728 | } |
729 | |
730 | intr_calculatemasks(evts, evtch, ci); |
731 | splx(s); |
732 | |
733 | return 0; |
734 | } |
735 | |
736 | void |
737 | event_set_iplhandler(struct cpu_info *ci, |
738 | struct intrhand *ih, |
739 | int level) |
740 | { |
741 | struct iplsource *ipls; |
742 | |
743 | KASSERT(ci == ih->ih_cpu); |
744 | if (ci->ci_isources[level] == NULL) { |
745 | ipls = kmem_zalloc(sizeof (struct iplsource), |
746 | KM_NOSLEEP); |
747 | if (ipls == NULL) |
748 | panic("can't allocate fixed interrupt source" ); |
749 | ipls->ipl_recurse = xenev_stubs[level].ist_recurse; |
750 | ipls->ipl_resume = xenev_stubs[level].ist_resume; |
751 | ipls->ipl_handlers = ih; |
752 | ci->ci_isources[level] = ipls; |
753 | } else { |
754 | ipls = ci->ci_isources[level]; |
755 | ih->ih_ipl_next = ipls->ipl_handlers; |
756 | ipls->ipl_handlers = ih; |
757 | } |
758 | } |
759 | |
760 | int |
761 | event_remove_handler(int evtch, int (*func)(void *), void *arg) |
762 | { |
763 | struct iplsource *ipls; |
764 | struct evtsource *evts; |
765 | struct intrhand *ih; |
766 | struct intrhand **ihp; |
767 | struct cpu_info *ci; |
768 | |
769 | evts = evtsource[evtch]; |
770 | if (evts == NULL) |
771 | return ENOENT; |
772 | |
773 | mutex_spin_enter(&evtlock[evtch]); |
774 | for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; |
775 | ih != NULL; |
776 | ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { |
777 | if (ih->ih_realfun == func && ih->ih_realarg == arg) |
778 | break; |
779 | } |
780 | if (ih == NULL) { |
781 | mutex_spin_exit(&evtlock[evtch]); |
782 | return ENOENT; |
783 | } |
784 | ci = ih->ih_cpu; |
785 | *ihp = ih->ih_evt_next; |
786 | |
787 | ipls = ci->ci_isources[ih->ih_level]; |
788 | for (ihp = &ipls->ipl_handlers, ih = ipls->ipl_handlers; |
789 | ih != NULL; |
790 | ihp = &ih->ih_ipl_next, ih = ih->ih_ipl_next) { |
791 | if (ih->ih_realfun == func && ih->ih_realarg == arg) |
792 | break; |
793 | } |
794 | if (ih == NULL) |
795 | panic("event_remove_handler" ); |
796 | *ihp = ih->ih_ipl_next; |
797 | mutex_spin_exit(&evtlock[evtch]); |
798 | kmem_free(ih, sizeof (struct intrhand)); |
799 | if (evts->ev_handlers == NULL) { |
800 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); |
801 | evcnt_detach(&evts->ev_evcnt); |
802 | kmem_free(evts, sizeof (struct evtsource)); |
803 | evtsource[evtch] = NULL; |
804 | } else { |
805 | intr_calculatemasks(evts, evtch, ci); |
806 | } |
807 | return 0; |
808 | } |
809 | |
810 | void |
811 | hypervisor_enable_event(unsigned int evtch) |
812 | { |
813 | #ifdef IRQ_DEBUG |
814 | if (evtch == IRQ_DEBUG) |
815 | printf("hypervisor_enable_evtch: evtch %d\n" , evtch); |
816 | #endif |
817 | |
818 | hypervisor_unmask_event(evtch); |
819 | #if NPCI > 0 || NISA > 0 |
820 | if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { |
821 | #ifdef IRQ_DEBUG |
822 | if (evtch == IRQ_DEBUG) |
823 | printf("pirq_notify(%d)\n" , evtch); |
824 | #endif |
825 | (void)HYPERVISOR_physdev_op(&physdev_op_notify); |
826 | } |
827 | #endif /* NPCI > 0 || NISA > 0 */ |
828 | } |
829 | |
830 | int |
831 | xen_debug_handler(void *arg) |
832 | { |
833 | struct cpu_info *ci = curcpu(); |
834 | int i; |
835 | int xci_ilevel = ci->ci_ilevel; |
836 | int xci_ipending = ci->ci_ipending; |
837 | int xci_idepth = ci->ci_idepth; |
838 | u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; |
839 | u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; |
840 | u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; |
841 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; |
842 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; |
843 | |
844 | u_long p; |
845 | |
846 | p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; |
847 | memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); |
848 | p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; |
849 | memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); |
850 | |
851 | __insn_barrier(); |
852 | printf("debug event\n" ); |
853 | printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n" , |
854 | xci_ilevel, xci_ipending, xci_idepth); |
855 | printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" |
856 | " evtchn_pending_sel 0x%lx\n" , |
857 | upcall_pending, upcall_mask, pending_sel); |
858 | printf("evtchn_mask" ); |
859 | for (i = 0 ; i <= LONG_MASK; i++) |
860 | printf(" %lx" , (u_long)evtchn_mask[i]); |
861 | printf("\n" ); |
862 | printf("evtchn_pending" ); |
863 | for (i = 0 ; i <= LONG_MASK; i++) |
864 | printf(" %lx" , (u_long)evtchn_pending[i]); |
865 | printf("\n" ); |
866 | return 0; |
867 | } |
868 | |