204 * Mapping between device interrupt and the allocated vector. Indexed
205 * by major number.
206 */
207 apix_dev_vector_t **apix_dev_vector;
208 /*
209 * Mapping between device major number and cpu id. It gets used
210 * when interrupt binding policy round robin with affinity is
211 * applied. With that policy, devices with the same major number
212 * will be bound to the same CPU.
213 */
214 processorid_t *apix_major_to_cpu; /* major to cpu mapping */
215 kmutex_t apix_mutex; /* for apix_dev_vector & apix_major_to_cpu */
216
217 int apix_nipis = 16; /* Maximum number of IPIs */
218 /*
219 * Maximum number of vectors in a CPU that can be used for interrupt
220 * allocation (including IPIs and the reserved vectors).
221 */
222 int apix_cpu_nvectors = APIX_NVECTOR;
223
224 /* gcpu.h */
225
226 extern void apic_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp);
227 extern void apic_change_eoi();
228
229 /*
230 * This is the loadable module wrapper
231 */
232
233 int
234 _init(void)
235 {
236 if (apic_coarse_hrtime)
237 apix_ops.psm_gethrtime = &apic_gettime;
238 return (psm_mod_init(&apix_hdlp, &apix_psm_info));
239 }
240
241 int
242 _fini(void)
243 {
2543 * Return the vector number if the translated IRQ for this device
2544 * has a vector mapping setup. If no IRQ setup exists or no vector is
2545 * allocated to it then return 0.
2546 */
2547 static apix_vector_t *
2548 apix_intx_xlate_vector(dev_info_t *dip, int inum, struct intrspec *ispec)
2549 {
2550 int irqno;
2551 apix_vector_t *vecp;
2552
2553 /* get the IRQ number */
2554 if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1)
2555 return (NULL);
2556
2557 /* get the vector number if a vector is allocated to this irqno */
2558 vecp = apix_intx_get_vector(irqno);
2559
2560 return (vecp);
2561 }
2562
2563 /* stub function */
2564 int
2565 apix_loaded(void)
2566 {
2567 return (apix_is_enabled);
2568 }
|
204 * Mapping between device interrupt and the allocated vector. Indexed
205 * by major number.
206 */
207 apix_dev_vector_t **apix_dev_vector;
208 /*
209 * Mapping between device major number and cpu id. It gets used
210 * when interrupt binding policy round robin with affinity is
211 * applied. With that policy, devices with the same major number
212 * will be bound to the same CPU.
213 */
214 processorid_t *apix_major_to_cpu; /* major to cpu mapping */
215 kmutex_t apix_mutex; /* for apix_dev_vector & apix_major_to_cpu */
216
217 int apix_nipis = 16; /* Maximum number of IPIs */
218 /*
219 * Maximum number of vectors in a CPU that can be used for interrupt
220 * allocation (including IPIs and the reserved vectors).
221 */
222 int apix_cpu_nvectors = APIX_NVECTOR;
223
224 /* number of CPUs in power-on transition state */
225 static int apic_poweron_cnt = 0;
226
227 /* gcpu.h */
228
229 extern void apic_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp);
230 extern void apic_change_eoi();
231
232 /*
233 * This is the loadable module wrapper
234 */
235
236 int
237 _init(void)
238 {
239 if (apic_coarse_hrtime)
240 apix_ops.psm_gethrtime = &apic_gettime;
241 return (psm_mod_init(&apix_hdlp, &apix_psm_info));
242 }
243
244 int
245 _fini(void)
246 {
2546 * Return the vector number if the translated IRQ for this device
2547 * has a vector mapping setup. If no IRQ setup exists or no vector is
2548 * allocated to it then return 0.
2549 */
2550 static apix_vector_t *
2551 apix_intx_xlate_vector(dev_info_t *dip, int inum, struct intrspec *ispec)
2552 {
2553 int irqno;
2554 apix_vector_t *vecp;
2555
2556 /* get the IRQ number */
2557 if ((irqno = apix_intx_xlate_irq(dip, inum, ispec)) == -1)
2558 return (NULL);
2559
2560 /* get the vector number if a vector is allocated to this irqno */
2561 vecp = apix_intx_get_vector(irqno);
2562
2563 return (vecp);
2564 }
2565
2566 /*
2567 * Switch between safe and x2APIC IPI sending method.
2568 * The CPU may power on in xapic mode or x2apic mode. If the CPU needs to send
2569 * an IPI to other CPUs before entering x2APIC mode, it still needs to use the
2570 * xAPIC method. Before sending a StartIPI to the target CPU, psm_send_ipi will
2571 * be changed to apic_common_send_ipi, which detects current local APIC mode and
2572 * use the right method to send an IPI. If some CPUs fail to start up,
2573 * apic_poweron_cnt won't return to zero, so apic_common_send_ipi will always be
2574 * used. psm_send_ipi can't be simply changed back to x2apic_send_ipi if some
2575 * CPUs failed to start up because those failed CPUs may recover itself later at
2576 * unpredictable time.
2577 */
2578 void
2579 apic_switch_ipi_callback(boolean_t enter)
2580 {
2581 ulong_t iflag;
2582 struct psm_ops *pops = psmops;
2583
2584 iflag = intr_clear();
2585 lock_set(&apic_mode_switch_lock);
2586 if (enter) {
2587 ASSERT(apic_poweron_cnt >= 0);
2588 if (apic_poweron_cnt == 0) {
2589 pops->psm_send_ipi = apic_common_send_ipi;
2590 send_dirintf = pops->psm_send_ipi;
2591 }
2592 apic_poweron_cnt++;
2593 } else {
2594 ASSERT(apic_poweron_cnt > 0);
2595 apic_poweron_cnt--;
2596 if (apic_poweron_cnt == 0) {
2597 pops->psm_send_ipi = x2apic_send_ipi;
2598 send_dirintf = pops->psm_send_ipi;
2599 }
2600 }
2601 lock_clear(&apic_mode_switch_lock);
2602 intr_restore(iflag);
2603 }
2604
2605 /* stub function */
2606 int
2607 apix_loaded(void)
2608 {
2609 return (apix_is_enabled);
2610 }
|