83 #include <sys/reboot.h>
84 #include <sys/hpet.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
87
88 /*
89 * Local Function Prototypes
90 */
91 static void apic_init_intr(void);
92
93 /*
94 * standard MP entries
95 */
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl, int *vect);
102 static void apic_setspl(int ipl);
103 static void x2apic_setspl(int ipl);
104 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
105 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
106 static int apic_disable_intr(processorid_t cpun);
107 static void apic_enable_intr(processorid_t cpun);
108 static int apic_get_ipivect(int ipl, int type);
109 static void apic_post_cyclic_setup(void *arg);
110
111 /*
112 * The following vector assignments influence the value of ipltopri and
113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
115 * we care to do so in future. Note some IPLs which are rarely used
116 * will share the vector ranges and heavily used IPLs (5 and 6) have
117 * a wide range.
118 *
119 * This array is used to initialize apic_ipls[] (in apic_init()).
120 *
121 * IPL Vector range. as passed to intr_enter
122 * 0 none.
123 * 1,2,3 0x20-0x2f 0x0-0xf
295 for (; j < MAXIPL + 1; j++)
296 /* fill up any empty ipltopri slots */
297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
298 apic_init_common();
299
300 #if !defined(__amd64)
301 if (cpuid_have_cr8access(CPU))
302 apic_have_32bit_cr8 = 1;
303 #endif
304 }
305
306 static void
307 apic_init_intr(void)
308 {
309 processorid_t cpun = psm_get_cpu_id();
310 uint_t nlvt;
311 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
312
313 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
314
315 if (apic_mode == LOCAL_APIC) {
316 /*
317 * We are running APIC in MMIO mode.
318 */
319 if (apic_flat_model) {
320 apic_reg_ops->apic_write(APIC_FORMAT_REG,
321 APIC_FLAT_MODEL);
322 } else {
323 apic_reg_ops->apic_write(APIC_FORMAT_REG,
324 APIC_CLUSTER_MODEL);
325 }
326
327 apic_reg_ops->apic_write(APIC_DEST_REG,
328 AV_HIGH_ORDER >> cpun);
329 }
330
331 if (apic_directed_EOI_supported()) {
332 /*
333 * Setting the 12th bit in the Spurious Interrupt Vector
334 * Register suppresses broadcast EOIs generated by the local
335 * APIC. The suppression of broadcast EOIs happens only when
336 * interrupts are level-triggered.
337 */
338 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
339 }
340
341 /* need to enable APIC before unmasking NMI */
342 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
343
344 /*
345 * Presence of an invalid vector with delivery mode AV_FIXED can
346 * cause an error interrupt, even if the entry is masked...so
347 * write a valid vector to LVT entries along with the mask bit
348 */
349
616 * cache usage. So, we leave it as is.
617 */
618 if (!apic_level_intr[irq]) {
619 apic_reg_ops->apic_send_eoi(0);
620 }
621
622 #ifdef DEBUG
623 APIC_DEBUG_BUF_PUT(vector);
624 APIC_DEBUG_BUF_PUT(irq);
625 APIC_DEBUG_BUF_PUT(nipl);
626 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
627 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
628 drv_usecwait(apic_stretch_interrupts);
629
630 if (apic_break_on_cpu == psm_get_cpu_id())
631 apic_break();
632 #endif /* DEBUG */
633 return (nipl);
634 }
635
636 /*
637 * This macro is a common code used by MMIO local apic and X2APIC
638 * local apic.
639 */
640 #define APIC_INTR_EXIT() \
641 { \
642 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
643 if (apic_level_intr[irq]) \
644 apic_reg_ops->apic_send_eoi(irq); \
645 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
646 /* ISR above current pri could not be in progress */ \
647 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
648 }
649
650 /*
651 * Any changes made to this function must also change X2APIC
652 * version of intr_exit.
653 */
654 void
655 apic_intr_exit(int prev_ipl, int irq)
656 {
657 apic_cpus_info_t *cpu_infop;
658
659 apic_reg_ops->apic_write_task_reg(apic_ipltopri[prev_ipl]);
660
661 APIC_INTR_EXIT();
662 }
663
664 /*
665 * Same as apic_intr_exit() except it uses MSR rather than MMIO
666 * to access local apic registers.
667 */
668 void
669 x2apic_intr_exit(int prev_ipl, int irq)
670 {
671 apic_cpus_info_t *cpu_infop;
672
673 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
674 APIC_INTR_EXIT();
675 }
676
677 intr_exit_fn_t
678 psm_intr_exit_fn(void)
679 {
680 if (apic_mode == LOCAL_X2APIC)
681 return (x2apic_intr_exit);
682
683 return (apic_intr_exit);
684 }
685
686 /*
687 * Mask all interrupts below or equal to the given IPL.
688 * Any changes made to this function must also change X2APIC
689 * version of setspl.
690 */
691 static void
692 apic_setspl(int ipl)
693 {
694 apic_reg_ops->apic_write_task_reg(apic_ipltopri[ipl]);
695
696 /* interrupts at ipl above this cannot be in progress */
697 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
698 /*
699 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
700 * have enough time to come in before the priority is raised again
701 * during the idle() loop.
702 */
703 if (apic_setspl_delay)
704 (void) apic_reg_ops->apic_get_pri();
705 }
706
707 /*
708 * X2APIC version of setspl.
709 * Mask all interrupts below or equal to the given IPL
710 */
711 static void
712 x2apic_setspl(int ipl)
713 {
714 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
715
716 /* interrupts at ipl above this cannot be in progress */
717 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
718 }
719
720 /*ARGSUSED*/
721 static int
722 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
723 {
724 return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
725 }
726
727 static int
728 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
729 {
730 return (apic_delspl_common(irqno, ipl, min_ipl, max_ipl));
731 }
732
733 static int
734 apic_post_cpu_start(void)
735 {
736 int cpun;
737 static int cpus_started = 1;
738
739 /* We know this CPU + BSP started successfully. */
740 cpus_started++;
741
742 /*
743 * On BSP we would have enabled X2APIC, if supported by processor,
744 * in acpi_probe(), but on AP we do it here.
745 *
746 * We enable X2APIC mode only if BSP is running in X2APIC & the
747 * local APIC mode of the current CPU is MMIO (xAPIC).
748 */
749 if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() &&
750 apic_local_mode() == LOCAL_APIC) {
751 apic_enable_x2apic();
752 }
753
754 /*
755 * Switch back to x2apic IPI sending method for performance when target
756 * CPU has entered x2apic mode.
757 */
758 if (apic_mode == LOCAL_X2APIC) {
759 apic_switch_ipi_callback(B_FALSE);
760 }
761
762 splx(ipltospl(LOCK_LEVEL));
763 apic_init_intr();
764
765 /*
766 * since some systems don't enable the internal cache on the non-boot
767 * cpus, so we have to enable them here
768 */
769 setcr0(getcr0() & ~(CR0_CD | CR0_NW));
770
771 #ifdef DEBUG
772 APIC_AV_PENDING_SET();
773 #else
774 if (apic_mode == LOCAL_APIC)
775 APIC_AV_PENDING_SET();
776 #endif /* DEBUG */
777
778 /*
779 * We may be booting, or resuming from suspend; aci_status will
780 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
781 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
782 */
783 cpun = psm_get_cpu_id();
784 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
785
786 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
787 return (PSM_SUCCESS);
788 }
789
790 /*
791 * type == -1 indicates it is an internal request. Do not change
792 * resv_vector for these requests
793 */
794 static int
795 apic_get_ipivect(int ipl, int type)
796 {
1313 }
1314
1315 return (rv);
1316 }
1317
1318
1319 uchar_t
1320 apic_modify_vector(uchar_t vector, int irq)
1321 {
1322 apic_vector_to_irq[vector] = (uchar_t)irq;
1323 return (vector);
1324 }
1325
1326 char *
1327 apic_get_apic_type(void)
1328 {
1329 return (apic_psm_info.p_mach_idstring);
1330 }
1331
1332 void
1333 x2apic_update_psm(void)
1334 {
1335 struct psm_ops *pops = &apic_ops;
1336
1337 ASSERT(pops != NULL);
1338
1339 pops->psm_intr_exit = x2apic_intr_exit;
1340 pops->psm_setspl = x2apic_setspl;
1341
1342 pops->psm_send_ipi = x2apic_send_ipi;
1343 send_dirintf = pops->psm_send_ipi;
1344
1345 apic_mode = LOCAL_X2APIC;
1346 apic_change_ops();
1347 }
|
83 #include <sys/reboot.h>
84 #include <sys/hpet.h>
85 #include <sys/apic_common.h>
86 #include <sys/apic_timer.h>
87
88 /*
89 * Local Function Prototypes
90 */
91 static void apic_init_intr(void);
92
93 /*
94 * standard MP entries
95 */
96 static int apic_probe(void);
97 static int apic_getclkirq(int ipl);
98 static void apic_init(void);
99 static void apic_picinit(void);
100 static int apic_post_cpu_start(void);
101 static int apic_intr_enter(int ipl, int *vect);
102 static void apic_setspl(int ipl);
103 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
104 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
105 static int apic_disable_intr(processorid_t cpun);
106 static void apic_enable_intr(processorid_t cpun);
107 static int apic_get_ipivect(int ipl, int type);
108 static void apic_post_cyclic_setup(void *arg);
109
110 /*
111 * The following vector assignments influence the value of ipltopri and
112 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
113 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
114 * we care to do so in future. Note some IPLs which are rarely used
115 * will share the vector ranges and heavily used IPLs (5 and 6) have
116 * a wide range.
117 *
118 * This array is used to initialize apic_ipls[] (in apic_init()).
119 *
120 * IPL Vector range. as passed to intr_enter
121 * 0 none.
122 * 1,2,3 0x20-0x2f 0x0-0xf
294 for (; j < MAXIPL + 1; j++)
295 /* fill up any empty ipltopri slots */
296 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
297 apic_init_common();
298
299 #if !defined(__amd64)
300 if (cpuid_have_cr8access(CPU))
301 apic_have_32bit_cr8 = 1;
302 #endif
303 }
304
305 static void
306 apic_init_intr(void)
307 {
308 processorid_t cpun = psm_get_cpu_id();
309 uint_t nlvt;
310 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
311
312 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
313
314 ASSERT(apic_mode == LOCAL_APIC);
315
316 /*
317 * We are running APIC in MMIO mode.
318 */
319 if (apic_flat_model) {
320 apic_reg_ops->apic_write(APIC_FORMAT_REG, APIC_FLAT_MODEL);
321 } else {
322 apic_reg_ops->apic_write(APIC_FORMAT_REG, APIC_CLUSTER_MODEL);
323 }
324
325 apic_reg_ops->apic_write(APIC_DEST_REG, AV_HIGH_ORDER >> cpun);
326
327 if (apic_directed_EOI_supported()) {
328 /*
329 * Setting the 12th bit in the Spurious Interrupt Vector
330 * Register suppresses broadcast EOIs generated by the local
331 * APIC. The suppression of broadcast EOIs happens only when
332 * interrupts are level-triggered.
333 */
334 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
335 }
336
337 /* need to enable APIC before unmasking NMI */
338 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
339
340 /*
341 * Presence of an invalid vector with delivery mode AV_FIXED can
342 * cause an error interrupt, even if the entry is masked...so
343 * write a valid vector to LVT entries along with the mask bit
344 */
345
612 * cache usage. So, we leave it as is.
613 */
614 if (!apic_level_intr[irq]) {
615 apic_reg_ops->apic_send_eoi(0);
616 }
617
618 #ifdef DEBUG
619 APIC_DEBUG_BUF_PUT(vector);
620 APIC_DEBUG_BUF_PUT(irq);
621 APIC_DEBUG_BUF_PUT(nipl);
622 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
623 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
624 drv_usecwait(apic_stretch_interrupts);
625
626 if (apic_break_on_cpu == psm_get_cpu_id())
627 apic_break();
628 #endif /* DEBUG */
629 return (nipl);
630 }
631
632 void
633 apic_intr_exit(int prev_ipl, int irq)
634 {
635 apic_cpus_info_t *cpu_infop;
636
637 apic_reg_ops->apic_write_task_reg(apic_ipltopri[prev_ipl]);
638
639 cpu_infop = &apic_cpus[psm_get_cpu_id()];
640 if (apic_level_intr[irq])
641 apic_reg_ops->apic_send_eoi(irq);
642 cpu_infop->aci_curipl = (uchar_t)prev_ipl;
643 /* ISR above current pri could not be in progress */
644 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1;
645 }
646
647 intr_exit_fn_t
648 psm_intr_exit_fn(void)
649 {
650 return (apic_intr_exit);
651 }
652
653 /*
654 * Mask all interrupts below or equal to the given IPL.
655 */
656 static void
657 apic_setspl(int ipl)
658 {
659 apic_reg_ops->apic_write_task_reg(apic_ipltopri[ipl]);
660
661 /* interrupts at ipl above this cannot be in progress */
662 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
663 /*
664 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
665 * have enough time to come in before the priority is raised again
666 * during the idle() loop.
667 */
668 if (apic_setspl_delay)
669 (void) apic_reg_ops->apic_get_pri();
670 }
671
672 /*ARGSUSED*/
673 static int
674 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
675 {
676 return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
677 }
678
679 static int
680 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
681 {
682 return (apic_delspl_common(irqno, ipl, min_ipl, max_ipl));
683 }
684
685 static int
686 apic_post_cpu_start(void)
687 {
688 int cpun;
689 static int cpus_started = 1;
690
691 /* We know this CPU + BSP started successfully. */
692 cpus_started++;
693
694 splx(ipltospl(LOCK_LEVEL));
695 apic_init_intr();
696
697 /*
698 * since some systems don't enable the internal cache on the non-boot
699 * cpus, so we have to enable them here
700 */
701 setcr0(getcr0() & ~(CR0_CD | CR0_NW));
702
703 APIC_AV_PENDING_SET();
704
705 /*
706 * We may be booting, or resuming from suspend; aci_status will
707 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
708 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
709 */
710 cpun = psm_get_cpu_id();
711 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
712
713 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
714 return (PSM_SUCCESS);
715 }
716
717 /*
718 * type == -1 indicates it is an internal request. Do not change
719 * resv_vector for these requests
720 */
721 static int
722 apic_get_ipivect(int ipl, int type)
723 {
1240 }
1241
1242 return (rv);
1243 }
1244
1245
1246 uchar_t
1247 apic_modify_vector(uchar_t vector, int irq)
1248 {
1249 apic_vector_to_irq[vector] = (uchar_t)irq;
1250 return (vector);
1251 }
1252
1253 char *
1254 apic_get_apic_type(void)
1255 {
1256 return (apic_psm_info.p_mach_idstring);
1257 }
1258
1259 void
1260 apic_switch_ipi_callback(boolean_t enter)
1261 {
1262 ASSERT(enter == B_TRUE);
1263 }
1264
1265 int
1266 apic_detect_x2apic(void)
1267 {
1268 return (0);
1269 }
1270
1271 void
1272 apic_enable_x2apic(void)
1273 {
1274 cmn_err(CE_PANIC, "apic_enable_x2apic() called in pcplusmp");
1275 }
1276
1277 void
1278 x2apic_update_psm(void)
1279 {
1280 cmn_err(CE_PANIC, "x2apic_update_psm() called in pcplusmp");
1281 }
|