Print this page
8620 pcplusmp shouldn't support x2APIC mode
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/pcplusmp/apic_common.c
+++ new/usr/src/uts/i86pc/io/pcplusmp/apic_common.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 + * Copyright (c) 2017, Joyent, Inc. All rights reserved.
27 27 * Copyright (c) 2016 by Delphix. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
32 32 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
33 33 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
34 34 * PSMI 1.5 extensions are supported in Solaris Nevada.
35 35 * PSMI 1.6 extensions are supported in Solaris Nevada.
36 36 * PSMI 1.7 extensions are supported in Solaris Nevada.
37 37 */
38 38 #define PSMI_1_7
39 39
40 40 #include <sys/processor.h>
41 41 #include <sys/time.h>
42 42 #include <sys/psm.h>
43 43 #include <sys/smp_impldefs.h>
44 44 #include <sys/cram.h>
45 45 #include <sys/acpi/acpi.h>
46 46 #include <sys/acpica.h>
47 47 #include <sys/psm_common.h>
48 48 #include <sys/apic.h>
49 49 #include <sys/pit.h>
50 50 #include <sys/ddi.h>
51 51 #include <sys/sunddi.h>
52 52 #include <sys/ddi_impldefs.h>
53 53 #include <sys/pci.h>
54 54 #include <sys/promif.h>
55 55 #include <sys/x86_archext.h>
56 56 #include <sys/cpc_impl.h>
57 57 #include <sys/uadmin.h>
58 58 #include <sys/panic.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/archsystm.h>
61 61 #include <sys/trap.h>
62 62 #include <sys/machsystm.h>
63 63 #include <sys/sysmacros.h>
64 64 #include <sys/cpuvar.h>
65 65 #include <sys/rm_platter.h>
66 66 #include <sys/privregs.h>
67 67 #include <sys/note.h>
68 68 #include <sys/pci_intr_lib.h>
69 69 #include <sys/spl.h>
70 70 #include <sys/clock.h>
71 71 #include <sys/dditypes.h>
72 72 #include <sys/sunddi.h>
73 73 #include <sys/x_call.h>
74 74 #include <sys/reboot.h>
75 75 #include <sys/hpet.h>
76 76 #include <sys/apic_common.h>
77 77 #include <sys/apic_timer.h>
78 78
79 79 static void apic_record_ioapic_rdt(void *intrmap_private,
80 80 ioapic_rdt_t *irdt);
81 81 static void apic_record_msi(void *intrmap_private, msi_regs_t *mregs);
82 82
83 83 /*
84 84 * Common routines between pcplusmp & apix (taken from apic.c).
85 85 */
86 86
87 87 int apic_clkinit(int);
88 88 hrtime_t apic_gethrtime(void);
89 89 void apic_send_ipi(int, int);
90 90 void apic_set_idlecpu(processorid_t);
91 91 void apic_unset_idlecpu(processorid_t);
92 92 void apic_shutdown(int, int);
93 93 void apic_preshutdown(int, int);
94 94 processorid_t apic_get_next_processorid(processorid_t);
95 95
96 96 hrtime_t apic_gettime();
97 97
98 98 enum apic_ioapic_method_type apix_mul_ioapic_method = APIC_MUL_IOAPIC_PCPLUSMP;
99 99
100 100 /* Now the ones for Dynamic Interrupt distribution */
101 101 int apic_enable_dynamic_migration = 0;
102 102
103 103 /* maximum loop count when sending Start IPIs. */
104 104 int apic_sipi_max_loop_count = 0x1000;
105 105
106 106 /*
107 107 * These variables are frequently accessed in apic_intr_enter(),
108 108 * apic_intr_exit and apic_setspl, so group them together
109 109 */
110 110 volatile uint32_t *apicadr = NULL; /* virtual addr of local APIC */
111 111 int apic_setspl_delay = 1; /* apic_setspl - delay enable */
112 112 int apic_clkvect;
113 113
114 114 /* vector at which error interrupts come in */
115 115 int apic_errvect;
116 116 int apic_enable_error_intr = 1;
117 117 int apic_error_display_delay = 100;
118 118
119 119 /* vector at which performance counter overflow interrupts come in */
120 120 int apic_cpcovf_vect;
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
121 121 int apic_enable_cpcovf_intr = 1;
122 122
123 123 /* vector at which CMCI interrupts come in */
124 124 int apic_cmci_vect;
125 125 extern int cmi_enable_cmci;
126 126 extern void cmi_cmci_trap(void);
127 127
128 128 kmutex_t cmci_cpu_setup_lock; /* protects cmci_cpu_setup_registered */
129 129 int cmci_cpu_setup_registered;
130 130
131 -/* number of CPUs in power-on transition state */
132 -static int apic_poweron_cnt = 0;
133 131 lock_t apic_mode_switch_lock;
134 132
135 133 /*
136 134 * Patchable global variables.
137 135 */
138 136 int apic_forceload = 0;
139 137
140 138 int apic_coarse_hrtime = 1; /* 0 - use accurate slow gethrtime() */
141 139
142 140 int apic_flat_model = 0; /* 0 - clustered. 1 - flat */
143 141 int apic_panic_on_nmi = 0;
144 142 int apic_panic_on_apic_error = 0;
145 143
146 144 int apic_verbose = 0; /* 0x1ff */
147 145
148 146 #ifdef DEBUG
149 147 int apic_debug = 0;
150 148 int apic_restrict_vector = 0;
151 149
152 150 int apic_debug_msgbuf[APIC_DEBUG_MSGBUFSIZE];
153 151 int apic_debug_msgbufindex = 0;
154 152
155 153 #endif /* DEBUG */
156 154
157 155 uint_t apic_nticks = 0;
158 156 uint_t apic_skipped_redistribute = 0;
159 157
160 158 uint_t last_count_read = 0;
161 159 lock_t apic_gethrtime_lock;
162 160 volatile int apic_hrtime_stamp = 0;
163 161 volatile hrtime_t apic_nsec_since_boot = 0;
164 162
165 163 static hrtime_t apic_last_hrtime = 0;
166 164 int apic_hrtime_error = 0;
167 165 int apic_remote_hrterr = 0;
168 166 int apic_num_nmis = 0;
169 167 int apic_apic_error = 0;
170 168 int apic_num_apic_errors = 0;
171 169 int apic_num_cksum_errors = 0;
172 170
173 171 int apic_error = 0;
174 172
175 173 static int apic_cmos_ssb_set = 0;
176 174
177 175 /* use to make sure only one cpu handles the nmi */
178 176 lock_t apic_nmi_lock;
179 177 /* use to make sure only one cpu handles the error interrupt */
180 178 lock_t apic_error_lock;
181 179
182 180 static struct {
183 181 uchar_t cntl;
184 182 uchar_t data;
185 183 } aspen_bmc[] = {
186 184 { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */
187 185 { CC_SMS_WR_NEXT, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */
188 186 { CC_SMS_WR_NEXT, 0x84 }, /* DataByte 1: SMS/OS no log */
189 187 { CC_SMS_WR_NEXT, 0x2 }, /* DataByte 2: Power Down */
190 188 { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 3: no pre-timeout */
191 189 { CC_SMS_WR_NEXT, 0x0 }, /* DataByte 4: timer expir. */
192 190 { CC_SMS_WR_NEXT, 0xa }, /* DataByte 5: init countdown */
193 191 { CC_SMS_WR_END, 0x0 }, /* DataByte 6: init countdown */
194 192
195 193 { CC_SMS_WR_START, 0x18 }, /* NetFn/LUN */
196 194 { CC_SMS_WR_END, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */
197 195 };
198 196
199 197 static struct {
200 198 int port;
201 199 uchar_t data;
202 200 } sitka_bmc[] = {
203 201 { SMS_COMMAND_REGISTER, SMS_WRITE_START },
204 202 { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */
205 203 { SMS_DATA_REGISTER, 0x24 }, /* Cmd SET_WATCHDOG_TIMER */
206 204 { SMS_DATA_REGISTER, 0x84 }, /* DataByte 1: SMS/OS no log */
207 205 { SMS_DATA_REGISTER, 0x2 }, /* DataByte 2: Power Down */
208 206 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 3: no pre-timeout */
209 207 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 4: timer expir. */
210 208 { SMS_DATA_REGISTER, 0xa }, /* DataByte 5: init countdown */
211 209 { SMS_COMMAND_REGISTER, SMS_WRITE_END },
212 210 { SMS_DATA_REGISTER, 0x0 }, /* DataByte 6: init countdown */
213 211
214 212 { SMS_COMMAND_REGISTER, SMS_WRITE_START },
215 213 { SMS_DATA_REGISTER, 0x18 }, /* NetFn/LUN */
216 214 { SMS_COMMAND_REGISTER, SMS_WRITE_END },
217 215 { SMS_DATA_REGISTER, 0x22 } /* Cmd RESET_WATCHDOG_TIMER */
218 216 };
219 217
220 218 /* Patchable global variables. */
221 219 int apic_kmdb_on_nmi = 0; /* 0 - no, 1 - yes enter kmdb */
222 220 uint32_t apic_divide_reg_init = 0; /* 0 - divide by 2 */
223 221
224 222 /* default apic ops without interrupt remapping */
225 223 static apic_intrmap_ops_t apic_nointrmap_ops = {
226 224 (int (*)(int))return_instr,
227 225 (void (*)(int))return_instr,
228 226 (void (*)(void **, dev_info_t *, uint16_t, int, uchar_t))return_instr,
229 227 (void (*)(void *, void *, uint16_t, int))return_instr,
230 228 (void (*)(void **))return_instr,
231 229 apic_record_ioapic_rdt,
232 230 apic_record_msi,
233 231 };
234 232
235 233 apic_intrmap_ops_t *apic_vt_ops = &apic_nointrmap_ops;
236 234 apic_cpus_info_t *apic_cpus = NULL;
237 235 cpuset_t apic_cpumask;
238 236 uint_t apic_picinit_called;
239 237
240 238 /* Flag to indicate that we need to shut down all processors */
241 239 static uint_t apic_shutdown_processors;
242 240
243 241 /*
244 242 * Probe the ioapic method for apix module. Called in apic_probe_common()
245 243 */
246 244 int
247 245 apic_ioapic_method_probe()
248 246 {
249 247 if (apix_enable == 0)
250 248 return (PSM_SUCCESS);
251 249
252 250 /*
253 251 * Set IOAPIC EOI handling method. The priority from low to high is:
254 252 * 1. IOxAPIC: with EOI register
255 253 * 2. IOMMU interrupt mapping
256 254 * 3. Mask-Before-EOI method for systems without boot
257 255 * interrupt routing, such as systems with only one IOAPIC;
258 256 * NVIDIA CK8-04/MCP55 systems; systems with bridge solution
259 257 * which disables the boot interrupt routing already.
260 258 * 4. Directed EOI
261 259 */
262 260 if (apic_io_ver[0] >= 0x20)
263 261 apix_mul_ioapic_method = APIC_MUL_IOAPIC_IOXAPIC;
264 262 if ((apic_io_max == 1) || (apic_nvidia_io_max == apic_io_max))
265 263 apix_mul_ioapic_method = APIC_MUL_IOAPIC_MASK;
266 264 if (apic_directed_EOI_supported())
267 265 apix_mul_ioapic_method = APIC_MUL_IOAPIC_DEOI;
268 266
269 267 /* fall back to pcplusmp */
270 268 if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_PCPLUSMP) {
271 269 /* make sure apix is after pcplusmp in /etc/mach */
272 270 apix_enable = 0; /* go ahead with pcplusmp install next */
273 271 return (PSM_FAILURE);
274 272 }
275 273
276 274 return (PSM_SUCCESS);
277 275 }
278 276
279 277 /*
280 278 * handler for APIC Error interrupt. Just print a warning and continue
281 279 */
282 280 int
283 281 apic_error_intr()
284 282 {
285 283 uint_t error0, error1, error;
286 284 uint_t i;
287 285
288 286 /*
289 287 * We need to write before read as per 7.4.17 of system prog manual.
290 288 * We do both and or the results to be safe
291 289 */
292 290 error0 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
293 291 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
294 292 error1 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
295 293 error = error0 | error1;
296 294
297 295 /*
298 296 * Clear the APIC error status (do this on all cpus that enter here)
299 297 * (two writes are required due to the semantics of accessing the
300 298 * error status register.)
301 299 */
302 300 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
303 301 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
304 302
305 303 /*
306 304 * Prevent more than 1 CPU from handling error interrupt causing
307 305 * double printing (interleave of characters from multiple
308 306 * CPU's when using prom_printf)
309 307 */
310 308 if (lock_try(&apic_error_lock) == 0)
311 309 return (error ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
312 310 if (error) {
313 311 #if DEBUG
314 312 if (apic_debug)
315 313 debug_enter("pcplusmp: APIC Error interrupt received");
316 314 #endif /* DEBUG */
317 315 if (apic_panic_on_apic_error)
318 316 cmn_err(CE_PANIC,
319 317 "APIC Error interrupt on CPU %d. Status = %x",
320 318 psm_get_cpu_id(), error);
321 319 else {
322 320 if ((error & ~APIC_CS_ERRORS) == 0) {
323 321 /* cksum error only */
324 322 apic_error |= APIC_ERR_APIC_ERROR;
325 323 apic_apic_error |= error;
326 324 apic_num_apic_errors++;
327 325 apic_num_cksum_errors++;
328 326 } else {
329 327 /*
330 328 * prom_printf is the best shot we have of
331 329 * something which is problem free from
332 330 * high level/NMI type of interrupts
333 331 */
334 332 prom_printf("APIC Error interrupt on CPU %d. "
335 333 "Status 0 = %x, Status 1 = %x\n",
336 334 psm_get_cpu_id(), error0, error1);
337 335 apic_error |= APIC_ERR_APIC_ERROR;
338 336 apic_apic_error |= error;
339 337 apic_num_apic_errors++;
340 338 for (i = 0; i < apic_error_display_delay; i++) {
341 339 tenmicrosec();
342 340 }
343 341 /*
344 342 * provide more delay next time limited to
345 343 * roughly 1 clock tick time
346 344 */
347 345 if (apic_error_display_delay < 500)
348 346 apic_error_display_delay *= 2;
349 347 }
350 348 }
351 349 lock_clear(&apic_error_lock);
352 350 return (DDI_INTR_CLAIMED);
353 351 } else {
354 352 lock_clear(&apic_error_lock);
355 353 return (DDI_INTR_UNCLAIMED);
356 354 }
357 355 }
358 356
359 357 /*
360 358 * Turn off the mask bit in the performance counter Local Vector Table entry.
361 359 */
362 360 void
363 361 apic_cpcovf_mask_clear(void)
364 362 {
365 363 apic_reg_ops->apic_write(APIC_PCINT_VECT,
366 364 (apic_reg_ops->apic_read(APIC_PCINT_VECT) & ~APIC_LVT_MASK));
367 365 }
368 366
369 367 /*ARGSUSED*/
370 368 static int
371 369 apic_cmci_enable(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
372 370 {
373 371 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
374 372 return (0);
375 373 }
376 374
377 375 /*ARGSUSED*/
378 376 static int
379 377 apic_cmci_disable(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
380 378 {
381 379 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect | AV_MASK);
382 380 return (0);
383 381 }
384 382
385 383 /*ARGSUSED*/
386 384 int
387 385 cmci_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
388 386 {
389 387 cpuset_t cpu_set;
390 388
391 389 CPUSET_ONLY(cpu_set, cpuid);
392 390
393 391 switch (what) {
394 392 case CPU_ON:
395 393 xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set),
396 394 (xc_func_t)apic_cmci_enable);
397 395 break;
398 396
399 397 case CPU_OFF:
400 398 xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set),
401 399 (xc_func_t)apic_cmci_disable);
402 400 break;
403 401
404 402 default:
405 403 break;
406 404 }
407 405
408 406 return (0);
409 407 }
410 408
411 409 static void
412 410 apic_disable_local_apic(void)
413 411 {
414 412 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
415 413 apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK);
416 414
417 415 /* local intr reg 0 */
418 416 apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK);
419 417
420 418 /* disable NMI */
421 419 apic_reg_ops->apic_write(APIC_INT_VECT1, AV_MASK);
422 420
423 421 /* and error interrupt */
424 422 apic_reg_ops->apic_write(APIC_ERR_VECT, AV_MASK);
425 423
426 424 /* and perf counter intr */
427 425 apic_reg_ops->apic_write(APIC_PCINT_VECT, AV_MASK);
428 426
429 427 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, APIC_SPUR_INTR);
430 428 }
431 429
432 430 static void
433 431 apic_cpu_send_SIPI(processorid_t cpun, boolean_t start)
434 432 {
435 433 int loop_count;
436 434 uint32_t vector;
437 435 uint_t apicid;
438 436 ulong_t iflag;
439 437
440 438 apicid = apic_cpus[cpun].aci_local_id;
441 439
442 440 /*
443 441 * Interrupts on current CPU will be disabled during the
444 442 * steps in order to avoid unwanted side effects from
445 443 * executing interrupt handlers on a problematic BIOS.
446 444 */
447 445 iflag = intr_clear();
448 446
449 447 if (start) {
450 448 outb(CMOS_ADDR, SSB);
451 449 outb(CMOS_DATA, BIOS_SHUTDOWN);
452 450 }
453 451
454 452 /*
455 453 * According to X2APIC specification in section '2.3.5.1' of
456 454 * Interrupt Command Register Semantics, the semantics of
457 455 * programming the Interrupt Command Register to dispatch an interrupt
458 456 * is simplified. A single MSR write to the 64-bit ICR is required
459 457 * for dispatching an interrupt. Specifically, with the 64-bit MSR
460 458 * interface to ICR, system software is not required to check the
461 459 * status of the delivery status bit prior to writing to the ICR
462 460 * to send an IPI. With the removal of the Delivery Status bit,
463 461 * system software no longer has a reason to read the ICR. It remains
464 462 * readable only to aid in debugging.
465 463 */
466 464 #ifdef DEBUG
467 465 APIC_AV_PENDING_SET();
468 466 #else
469 467 if (apic_mode == LOCAL_APIC) {
470 468 APIC_AV_PENDING_SET();
471 469 }
472 470 #endif /* DEBUG */
473 471
474 472 /* for integrated - make sure there is one INIT IPI in buffer */
475 473 /* for external - it will wake up the cpu */
476 474 apic_reg_ops->apic_write_int_cmd(apicid, AV_ASSERT | AV_RESET);
477 475
478 476 /* If only 1 CPU is installed, PENDING bit will not go low */
479 477 for (loop_count = apic_sipi_max_loop_count; loop_count; loop_count--) {
480 478 if (apic_mode == LOCAL_APIC &&
481 479 apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
482 480 apic_ret();
483 481 else
484 482 break;
485 483 }
486 484
487 485 apic_reg_ops->apic_write_int_cmd(apicid, AV_DEASSERT | AV_RESET);
488 486 drv_usecwait(20000); /* 20 milli sec */
489 487
490 488 if (apic_cpus[cpun].aci_local_ver >= APIC_INTEGRATED_VERS) {
491 489 /* integrated apic */
492 490
493 491 vector = (rm_platter_pa >> MMU_PAGESHIFT) &
494 492 (APIC_VECTOR_MASK | APIC_IPL_MASK);
495 493
496 494 /* to offset the INIT IPI queue up in the buffer */
497 495 apic_reg_ops->apic_write_int_cmd(apicid, vector | AV_STARTUP);
498 496 drv_usecwait(200); /* 20 micro sec */
499 497
500 498 /*
501 499 * send the second SIPI (Startup IPI) as recommended by Intel
502 500 * software development manual.
503 501 */
504 502 apic_reg_ops->apic_write_int_cmd(apicid, vector | AV_STARTUP);
505 503 drv_usecwait(200); /* 20 micro sec */
506 504 }
507 505
508 506 intr_restore(iflag);
509 507 }
510 508
511 509 /*ARGSUSED1*/
512 510 int
513 511 apic_cpu_start(processorid_t cpun, caddr_t arg)
514 512 {
515 513 ASSERT(MUTEX_HELD(&cpu_lock));
516 514
517 515 if (!apic_cpu_in_range(cpun)) {
518 516 return (EINVAL);
519 517 }
520 518
521 519 /*
522 520 * Switch to apic_common_send_ipi for safety during starting other CPUs.
523 521 */
524 522 if (apic_mode == LOCAL_X2APIC) {
525 523 apic_switch_ipi_callback(B_TRUE);
526 524 }
527 525
528 526 apic_cmos_ssb_set = 1;
529 527 apic_cpu_send_SIPI(cpun, B_TRUE);
530 528
531 529 return (0);
532 530 }
533 531
534 532 /*
535 533 * Put CPU into halted state with interrupts disabled.
536 534 */
537 535 /*ARGSUSED1*/
538 536 int
539 537 apic_cpu_stop(processorid_t cpun, caddr_t arg)
540 538 {
541 539 int rc;
542 540 cpu_t *cp;
543 541 extern cpuset_t cpu_ready_set;
544 542 extern void cpu_idle_intercept_cpu(cpu_t *cp);
545 543
546 544 ASSERT(MUTEX_HELD(&cpu_lock));
547 545
548 546 if (!apic_cpu_in_range(cpun)) {
549 547 return (EINVAL);
550 548 }
551 549 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
552 550 return (ENOTSUP);
553 551 }
554 552
555 553 cp = cpu_get(cpun);
556 554 ASSERT(cp != NULL);
557 555 ASSERT((cp->cpu_flags & CPU_OFFLINE) != 0);
558 556 ASSERT((cp->cpu_flags & CPU_QUIESCED) != 0);
559 557 ASSERT((cp->cpu_flags & CPU_ENABLE) == 0);
560 558
561 559 /* Clear CPU_READY flag to disable cross calls. */
562 560 cp->cpu_flags &= ~CPU_READY;
563 561 CPUSET_ATOMIC_DEL(cpu_ready_set, cpun);
564 562 rc = xc_flush_cpu(cp);
565 563 if (rc != 0) {
566 564 CPUSET_ATOMIC_ADD(cpu_ready_set, cpun);
567 565 cp->cpu_flags |= CPU_READY;
568 566 return (rc);
569 567 }
570 568
571 569 /* Intercept target CPU at a safe point before powering it off. */
572 570 cpu_idle_intercept_cpu(cp);
573 571
574 572 apic_cpu_send_SIPI(cpun, B_FALSE);
575 573 cp->cpu_flags &= ~CPU_RUNNING;
576 574
577 575 return (0);
578 576 }
579 577
580 578 int
581 579 apic_cpu_ops(psm_cpu_request_t *reqp)
582 580 {
583 581 if (reqp == NULL) {
584 582 return (EINVAL);
585 583 }
586 584
587 585 switch (reqp->pcr_cmd) {
588 586 case PSM_CPU_ADD:
589 587 return (apic_cpu_add(reqp));
590 588
591 589 case PSM_CPU_REMOVE:
592 590 return (apic_cpu_remove(reqp));
593 591
594 592 case PSM_CPU_STOP:
595 593 return (apic_cpu_stop(reqp->req.cpu_stop.cpuid,
596 594 reqp->req.cpu_stop.ctx));
597 595
598 596 default:
599 597 return (ENOTSUP);
600 598 }
601 599 }
602 600
603 601 #ifdef DEBUG
604 602 int apic_break_on_cpu = 9;
605 603 int apic_stretch_interrupts = 0;
606 604 int apic_stretch_ISR = 1 << 3; /* IPL of 3 matches nothing now */
607 605 #endif /* DEBUG */
608 606
609 607 /*
610 608 * generates an interprocessor interrupt to another CPU. Any changes made to
611 609 * this routine must be accompanied by similar changes to
612 610 * apic_common_send_ipi().
613 611 */
614 612 void
615 613 apic_send_ipi(int cpun, int ipl)
616 614 {
617 615 int vector;
618 616 ulong_t flag;
619 617
620 618 vector = apic_resv_vector[ipl];
621 619
622 620 ASSERT((vector >= APIC_BASE_VECT) && (vector <= APIC_SPUR_INTR));
623 621
624 622 flag = intr_clear();
625 623
626 624 APIC_AV_PENDING_SET();
627 625
628 626 apic_reg_ops->apic_write_int_cmd(apic_cpus[cpun].aci_local_id,
629 627 vector);
630 628
631 629 intr_restore(flag);
632 630 }
633 631
634 632
635 633 /*ARGSUSED*/
636 634 void
637 635 apic_set_idlecpu(processorid_t cpun)
638 636 {
639 637 }
640 638
641 639 /*ARGSUSED*/
642 640 void
643 641 apic_unset_idlecpu(processorid_t cpun)
644 642 {
645 643 }
646 644
647 645
648 646 void
649 647 apic_ret()
650 648 {
651 649 }
652 650
653 651 /*
654 652 * If apic_coarse_time == 1, then apic_gettime() is used instead of
655 653 * apic_gethrtime(). This is used for performance instead of accuracy.
656 654 */
657 655
658 656 hrtime_t
659 657 apic_gettime()
660 658 {
661 659 int old_hrtime_stamp;
662 660 hrtime_t temp;
663 661
664 662 /*
665 663 * In one-shot mode, we do not keep time, so if anyone
666 664 * calls psm_gettime() directly, we vector over to
667 665 * gethrtime().
668 666 * one-shot mode MUST NOT be enabled if this psm is the source of
669 667 * hrtime.
670 668 */
671 669
672 670 if (apic_oneshot)
673 671 return (gethrtime());
674 672
675 673
676 674 gettime_again:
677 675 while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
678 676 apic_ret();
679 677
680 678 temp = apic_nsec_since_boot;
681 679
682 680 if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */
683 681 goto gettime_again;
684 682 }
685 683 return (temp);
686 684 }
687 685
688 686 /*
689 687 * Here we return the number of nanoseconds since booting. Note every
690 688 * clock interrupt increments apic_nsec_since_boot by the appropriate
691 689 * amount.
692 690 */
693 691 hrtime_t
694 692 apic_gethrtime(void)
695 693 {
696 694 int curr_timeval, countval, elapsed_ticks;
697 695 int old_hrtime_stamp, status;
698 696 hrtime_t temp;
699 697 uint32_t cpun;
700 698 ulong_t oflags;
701 699
702 700 /*
703 701 * In one-shot mode, we do not keep time, so if anyone
704 702 * calls psm_gethrtime() directly, we vector over to
705 703 * gethrtime().
706 704 * one-shot mode MUST NOT be enabled if this psm is the source of
707 705 * hrtime.
708 706 */
709 707
710 708 if (apic_oneshot)
711 709 return (gethrtime());
712 710
713 711 oflags = intr_clear(); /* prevent migration */
714 712
715 713 cpun = apic_reg_ops->apic_read(APIC_LID_REG);
716 714 if (apic_mode == LOCAL_APIC)
717 715 cpun >>= APIC_ID_BIT_OFFSET;
718 716
719 717 lock_set(&apic_gethrtime_lock);
720 718
721 719 gethrtime_again:
722 720 while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
723 721 apic_ret();
724 722
725 723 /*
726 724 * Check to see which CPU we are on. Note the time is kept on
727 725 * the local APIC of CPU 0. If on CPU 0, simply read the current
728 726 * counter. If on another CPU, issue a remote read command to CPU 0.
729 727 */
730 728 if (cpun == apic_cpus[0].aci_local_id) {
731 729 countval = apic_reg_ops->apic_read(APIC_CURR_COUNT);
732 730 } else {
733 731 #ifdef DEBUG
734 732 APIC_AV_PENDING_SET();
735 733 #else
736 734 if (apic_mode == LOCAL_APIC)
737 735 APIC_AV_PENDING_SET();
738 736 #endif /* DEBUG */
739 737
740 738 apic_reg_ops->apic_write_int_cmd(
741 739 apic_cpus[0].aci_local_id, APIC_CURR_ADD | AV_REMOTE);
742 740
743 741 while ((status = apic_reg_ops->apic_read(APIC_INT_CMD1))
744 742 & AV_READ_PENDING) {
745 743 apic_ret();
746 744 }
747 745
748 746 if (status & AV_REMOTE_STATUS) /* 1 = valid */
749 747 countval = apic_reg_ops->apic_read(APIC_REMOTE_READ);
750 748 else { /* 0 = invalid */
751 749 apic_remote_hrterr++;
752 750 /*
753 751 * return last hrtime right now, will need more
754 752 * testing if change to retry
755 753 */
756 754 temp = apic_last_hrtime;
757 755
758 756 lock_clear(&apic_gethrtime_lock);
759 757
760 758 intr_restore(oflags);
761 759
762 760 return (temp);
763 761 }
764 762 }
765 763 if (countval > last_count_read)
766 764 countval = 0;
767 765 else
768 766 last_count_read = countval;
769 767
770 768 elapsed_ticks = apic_hertz_count - countval;
771 769
772 770 curr_timeval = APIC_TICKS_TO_NSECS(elapsed_ticks);
773 771 temp = apic_nsec_since_boot + curr_timeval;
774 772
775 773 if (apic_hrtime_stamp != old_hrtime_stamp) { /* got an interrupt */
776 774 /* we might have clobbered last_count_read. Restore it */
777 775 last_count_read = apic_hertz_count;
778 776 goto gethrtime_again;
779 777 }
780 778
781 779 if (temp < apic_last_hrtime) {
782 780 /* return last hrtime if error occurs */
783 781 apic_hrtime_error++;
784 782 temp = apic_last_hrtime;
785 783 }
786 784 else
787 785 apic_last_hrtime = temp;
788 786
789 787 lock_clear(&apic_gethrtime_lock);
790 788 intr_restore(oflags);
791 789
792 790 return (temp);
793 791 }
794 792
795 793 /* apic NMI handler */
796 794 /*ARGSUSED*/
797 795 void
798 796 apic_nmi_intr(caddr_t arg, struct regs *rp)
799 797 {
800 798 if (apic_shutdown_processors) {
801 799 apic_disable_local_apic();
802 800 return;
803 801 }
804 802
805 803 apic_error |= APIC_ERR_NMI;
806 804
807 805 if (!lock_try(&apic_nmi_lock))
808 806 return;
809 807 apic_num_nmis++;
810 808
811 809 if (apic_kmdb_on_nmi && psm_debugger()) {
812 810 debug_enter("NMI received: entering kmdb\n");
813 811 } else if (apic_panic_on_nmi) {
814 812 /* Keep panic from entering kmdb. */
815 813 nopanicdebug = 1;
816 814 panic("NMI received\n");
817 815 } else {
818 816 /*
819 817 * prom_printf is the best shot we have of something which is
820 818 * problem free from high level/NMI type of interrupts
821 819 */
822 820 prom_printf("NMI received\n");
823 821 }
824 822
825 823 lock_clear(&apic_nmi_lock);
826 824 }
827 825
828 826 processorid_t
829 827 apic_get_next_processorid(processorid_t cpu_id)
830 828 {
831 829
832 830 int i;
833 831
834 832 if (cpu_id == -1)
835 833 return ((processorid_t)0);
836 834
837 835 for (i = cpu_id + 1; i < NCPU; i++) {
838 836 if (apic_cpu_in_range(i))
839 837 return (i);
840 838 }
841 839
842 840 return ((processorid_t)-1);
843 841 }
844 842
845 843 int
846 844 apic_cpu_add(psm_cpu_request_t *reqp)
847 845 {
848 846 int i, rv = 0;
849 847 ulong_t iflag;
850 848 boolean_t first = B_TRUE;
851 849 uchar_t localver;
852 850 uint32_t localid, procid;
853 851 processorid_t cpuid = (processorid_t)-1;
854 852 mach_cpu_add_arg_t *ap;
855 853
856 854 ASSERT(reqp != NULL);
857 855 reqp->req.cpu_add.cpuid = (processorid_t)-1;
858 856
859 857 /* Check whether CPU hotplug is supported. */
860 858 if (!plat_dr_support_cpu() || apic_max_nproc == -1) {
861 859 return (ENOTSUP);
862 860 }
863 861
864 862 ap = (mach_cpu_add_arg_t *)reqp->req.cpu_add.argp;
865 863 switch (ap->type) {
866 864 case MACH_CPU_ARG_LOCAL_APIC:
867 865 localid = ap->arg.apic.apic_id;
868 866 procid = ap->arg.apic.proc_id;
869 867 if (localid >= 255 || procid > 255) {
870 868 cmn_err(CE_WARN,
871 869 "!apic: apicid(%u) or procid(%u) is invalid.",
872 870 localid, procid);
873 871 return (EINVAL);
874 872 }
875 873 break;
876 874
877 875 case MACH_CPU_ARG_LOCAL_X2APIC:
878 876 localid = ap->arg.apic.apic_id;
879 877 procid = ap->arg.apic.proc_id;
880 878 if (localid >= UINT32_MAX) {
881 879 cmn_err(CE_WARN,
882 880 "!apic: x2apicid(%u) is invalid.", localid);
883 881 return (EINVAL);
884 882 } else if (localid >= 255 && apic_mode == LOCAL_APIC) {
885 883 cmn_err(CE_WARN, "!apic: system is in APIC mode, "
886 884 "can't support x2APIC processor.");
887 885 return (ENOTSUP);
888 886 }
889 887 break;
890 888
891 889 default:
892 890 cmn_err(CE_WARN,
893 891 "!apic: unknown argument type %d to apic_cpu_add().",
894 892 ap->type);
895 893 return (EINVAL);
896 894 }
897 895
898 896 /* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
899 897 iflag = intr_clear();
900 898 lock_set(&apic_ioapic_lock);
901 899
902 900 /* Check whether local APIC id already exists. */
903 901 for (i = 0; i < apic_nproc; i++) {
904 902 if (!CPU_IN_SET(apic_cpumask, i))
905 903 continue;
906 904 if (apic_cpus[i].aci_local_id == localid) {
907 905 lock_clear(&apic_ioapic_lock);
908 906 intr_restore(iflag);
909 907 cmn_err(CE_WARN,
910 908 "!apic: local apic id %u already exists.",
911 909 localid);
912 910 return (EEXIST);
913 911 } else if (apic_cpus[i].aci_processor_id == procid) {
914 912 lock_clear(&apic_ioapic_lock);
915 913 intr_restore(iflag);
916 914 cmn_err(CE_WARN,
917 915 "!apic: processor id %u already exists.",
918 916 (int)procid);
919 917 return (EEXIST);
920 918 }
921 919
922 920 /*
923 921 * There's no local APIC version number available in MADT table,
924 922 * so assume that all CPUs are homogeneous and use local APIC
925 923 * version number of the first existing CPU.
926 924 */
927 925 if (first) {
928 926 first = B_FALSE;
929 927 localver = apic_cpus[i].aci_local_ver;
930 928 }
931 929 }
932 930 ASSERT(first == B_FALSE);
933 931
934 932 /*
935 933 * Try to assign the same cpuid if APIC id exists in the dirty cache.
936 934 */
937 935 for (i = 0; i < apic_max_nproc; i++) {
938 936 if (CPU_IN_SET(apic_cpumask, i)) {
939 937 ASSERT((apic_cpus[i].aci_status & APIC_CPU_FREE) == 0);
940 938 continue;
941 939 }
942 940 ASSERT(apic_cpus[i].aci_status & APIC_CPU_FREE);
943 941 if ((apic_cpus[i].aci_status & APIC_CPU_DIRTY) &&
944 942 apic_cpus[i].aci_local_id == localid &&
945 943 apic_cpus[i].aci_processor_id == procid) {
946 944 cpuid = i;
947 945 break;
948 946 }
949 947 }
950 948
951 949 /* Avoid the dirty cache and allocate fresh slot if possible. */
952 950 if (cpuid == (processorid_t)-1) {
953 951 for (i = 0; i < apic_max_nproc; i++) {
954 952 if ((apic_cpus[i].aci_status & APIC_CPU_FREE) &&
955 953 (apic_cpus[i].aci_status & APIC_CPU_DIRTY) == 0) {
956 954 cpuid = i;
957 955 break;
958 956 }
959 957 }
960 958 }
961 959
962 960 /* Try to find any free slot as last resort. */
963 961 if (cpuid == (processorid_t)-1) {
964 962 for (i = 0; i < apic_max_nproc; i++) {
965 963 if (apic_cpus[i].aci_status & APIC_CPU_FREE) {
966 964 cpuid = i;
967 965 break;
968 966 }
969 967 }
970 968 }
971 969
972 970 if (cpuid == (processorid_t)-1) {
973 971 lock_clear(&apic_ioapic_lock);
974 972 intr_restore(iflag);
975 973 cmn_err(CE_NOTE,
976 974 "!apic: failed to allocate cpu id for processor %u.",
977 975 procid);
978 976 rv = EAGAIN;
979 977 } else if (ACPI_FAILURE(acpica_map_cpu(cpuid, procid))) {
980 978 lock_clear(&apic_ioapic_lock);
981 979 intr_restore(iflag);
982 980 cmn_err(CE_NOTE,
983 981 "!apic: failed to build mapping for processor %u.",
984 982 procid);
985 983 rv = EBUSY;
986 984 } else {
987 985 ASSERT(cpuid >= 0 && cpuid < NCPU);
988 986 ASSERT(cpuid < apic_max_nproc && cpuid < max_ncpus);
989 987 bzero(&apic_cpus[cpuid], sizeof (apic_cpus[0]));
990 988 apic_cpus[cpuid].aci_processor_id = procid;
991 989 apic_cpus[cpuid].aci_local_id = localid;
992 990 apic_cpus[cpuid].aci_local_ver = localver;
993 991 CPUSET_ATOMIC_ADD(apic_cpumask, cpuid);
994 992 if (cpuid >= apic_nproc) {
995 993 apic_nproc = cpuid + 1;
996 994 }
997 995 lock_clear(&apic_ioapic_lock);
998 996 intr_restore(iflag);
999 997 reqp->req.cpu_add.cpuid = cpuid;
1000 998 }
1001 999
1002 1000 return (rv);
1003 1001 }
1004 1002
1005 1003 int
1006 1004 apic_cpu_remove(psm_cpu_request_t *reqp)
1007 1005 {
1008 1006 int i;
1009 1007 ulong_t iflag;
1010 1008 processorid_t cpuid;
1011 1009
1012 1010 /* Check whether CPU hotplug is supported. */
1013 1011 if (!plat_dr_support_cpu() || apic_max_nproc == -1) {
1014 1012 return (ENOTSUP);
1015 1013 }
1016 1014
1017 1015 cpuid = reqp->req.cpu_remove.cpuid;
1018 1016
1019 1017 /* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
1020 1018 iflag = intr_clear();
1021 1019 lock_set(&apic_ioapic_lock);
1022 1020
1023 1021 if (!apic_cpu_in_range(cpuid)) {
1024 1022 lock_clear(&apic_ioapic_lock);
1025 1023 intr_restore(iflag);
1026 1024 cmn_err(CE_WARN,
1027 1025 "!apic: cpuid %d doesn't exist in apic_cpus array.",
1028 1026 cpuid);
1029 1027 return (ENODEV);
1030 1028 }
1031 1029 ASSERT((apic_cpus[cpuid].aci_status & APIC_CPU_FREE) == 0);
1032 1030
1033 1031 if (ACPI_FAILURE(acpica_unmap_cpu(cpuid))) {
1034 1032 lock_clear(&apic_ioapic_lock);
1035 1033 intr_restore(iflag);
1036 1034 return (ENOENT);
1037 1035 }
1038 1036
1039 1037 if (cpuid == apic_nproc - 1) {
1040 1038 /*
1041 1039 * We are removing the highest numbered cpuid so we need to
1042 1040 * find the next highest cpuid as the new value for apic_nproc.
1043 1041 */
1044 1042 for (i = apic_nproc; i > 0; i--) {
1045 1043 if (CPU_IN_SET(apic_cpumask, i - 1)) {
1046 1044 apic_nproc = i;
1047 1045 break;
1048 1046 }
1049 1047 }
1050 1048 /* at least one CPU left */
1051 1049 ASSERT(i > 0);
1052 1050 }
1053 1051 CPUSET_ATOMIC_DEL(apic_cpumask, cpuid);
1054 1052 /* mark slot as free and keep it in the dirty cache */
1055 1053 apic_cpus[cpuid].aci_status = APIC_CPU_FREE | APIC_CPU_DIRTY;
1056 1054
1057 1055 lock_clear(&apic_ioapic_lock);
1058 1056 intr_restore(iflag);
1059 1057
1060 1058 return (0);
1061 1059 }
1062 1060
1063 1061 /*
1064 1062 * Return the number of APIC clock ticks elapsed for 8245 to decrement
1065 1063 * (APIC_TIME_COUNT + pit_ticks_adj) ticks.
1066 1064 */
1067 1065 uint_t
1068 1066 apic_calibrate(volatile uint32_t *addr, uint16_t *pit_ticks_adj)
1069 1067 {
1070 1068 uint8_t pit_tick_lo;
1071 1069 uint16_t pit_tick, target_pit_tick;
1072 1070 uint32_t start_apic_tick, end_apic_tick;
1073 1071 ulong_t iflag;
1074 1072 uint32_t reg;
1075 1073
1076 1074 reg = addr + APIC_CURR_COUNT - apicadr;
1077 1075
1078 1076 iflag = intr_clear();
1079 1077
1080 1078 do {
1081 1079 pit_tick_lo = inb(PITCTR0_PORT);
1082 1080 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1083 1081 } while (pit_tick < APIC_TIME_MIN ||
1084 1082 pit_tick_lo <= APIC_LB_MIN || pit_tick_lo >= APIC_LB_MAX);
1085 1083
1086 1084 /*
1087 1085 * Wait for the 8254 to decrement by 5 ticks to ensure
1088 1086 * we didn't start in the middle of a tick.
1089 1087 * Compare with 0x10 for the wrap around case.
1090 1088 */
1091 1089 target_pit_tick = pit_tick - 5;
1092 1090 do {
1093 1091 pit_tick_lo = inb(PITCTR0_PORT);
1094 1092 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1095 1093 } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1096 1094
1097 1095 start_apic_tick = apic_reg_ops->apic_read(reg);
1098 1096
1099 1097 /*
1100 1098 * Wait for the 8254 to decrement by
1101 1099 * (APIC_TIME_COUNT + pit_ticks_adj) ticks
1102 1100 */
1103 1101 target_pit_tick = pit_tick - APIC_TIME_COUNT;
1104 1102 do {
1105 1103 pit_tick_lo = inb(PITCTR0_PORT);
1106 1104 pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1107 1105 } while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1108 1106
1109 1107 end_apic_tick = apic_reg_ops->apic_read(reg);
1110 1108
1111 1109 *pit_ticks_adj = target_pit_tick - pit_tick;
1112 1110
1113 1111 intr_restore(iflag);
1114 1112
1115 1113 return (start_apic_tick - end_apic_tick);
1116 1114 }
1117 1115
1118 1116 /*
1119 1117 * Initialise the APIC timer on the local APIC of CPU 0 to the desired
1120 1118 * frequency. Note at this stage in the boot sequence, the boot processor
1121 1119 * is the only active processor.
1122 1120 * hertz value of 0 indicates a one-shot mode request. In this case
1123 1121 * the function returns the resolution (in nanoseconds) for the hardware
1124 1122 * timer interrupt. If one-shot mode capability is not available,
1125 1123 * the return value will be 0. apic_enable_oneshot is a global switch
1126 1124 * for disabling the functionality.
1127 1125 * A non-zero positive value for hertz indicates a periodic mode request.
1128 1126 * In this case the hardware will be programmed to generate clock interrupts
1129 1127 * at hertz frequency and returns the resolution of interrupts in
1130 1128 * nanosecond.
1131 1129 */
1132 1130
1133 1131 int
1134 1132 apic_clkinit(int hertz)
1135 1133 {
1136 1134 int ret;
1137 1135
1138 1136 apic_int_busy_mark = (apic_int_busy_mark *
1139 1137 apic_sample_factor_redistribution) / 100;
1140 1138 apic_int_free_mark = (apic_int_free_mark *
1141 1139 apic_sample_factor_redistribution) / 100;
1142 1140 apic_diff_for_redistribution = (apic_diff_for_redistribution *
1143 1141 apic_sample_factor_redistribution) / 100;
1144 1142
1145 1143 ret = apic_timer_init(hertz);
1146 1144 return (ret);
1147 1145
1148 1146 }
1149 1147
1150 1148 /*
1151 1149 * apic_preshutdown:
1152 1150 * Called early in shutdown whilst we can still access filesystems to do
1153 1151 * things like loading modules which will be required to complete shutdown
1154 1152 * after filesystems are all unmounted.
1155 1153 */
1156 1154 void
1157 1155 apic_preshutdown(int cmd, int fcn)
1158 1156 {
1159 1157 APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n",
1160 1158 cmd, fcn, apic_poweroff_method, apic_enable_acpi));
1161 1159 }
1162 1160
1163 1161 void
1164 1162 apic_shutdown(int cmd, int fcn)
1165 1163 {
1166 1164 int restarts, attempts;
1167 1165 int i;
1168 1166 uchar_t byte;
1169 1167 ulong_t iflag;
1170 1168
1171 1169 hpet_acpi_fini();
1172 1170
1173 1171 /* Send NMI to all CPUs except self to do per processor shutdown */
1174 1172 iflag = intr_clear();
1175 1173 #ifdef DEBUG
1176 1174 APIC_AV_PENDING_SET();
1177 1175 #else
1178 1176 if (apic_mode == LOCAL_APIC)
1179 1177 APIC_AV_PENDING_SET();
1180 1178 #endif /* DEBUG */
1181 1179 apic_shutdown_processors = 1;
1182 1180 apic_reg_ops->apic_write(APIC_INT_CMD1,
1183 1181 AV_NMI | AV_LEVEL | AV_SH_ALL_EXCSELF);
1184 1182
1185 1183 /* restore cmos shutdown byte before reboot */
1186 1184 if (apic_cmos_ssb_set) {
1187 1185 outb(CMOS_ADDR, SSB);
1188 1186 outb(CMOS_DATA, 0);
1189 1187 }
1190 1188
1191 1189 ioapic_disable_redirection();
1192 1190
1193 1191 /* disable apic mode if imcr present */
1194 1192 if (apic_imcrp) {
1195 1193 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
1196 1194 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_PIC);
1197 1195 }
1198 1196
1199 1197 apic_disable_local_apic();
1200 1198
1201 1199 intr_restore(iflag);
1202 1200
1203 1201 /* remainder of function is for shutdown cases only */
1204 1202 if (cmd != A_SHUTDOWN)
1205 1203 return;
1206 1204
1207 1205 /*
1208 1206 * Switch system back into Legacy-Mode if using ACPI and
1209 1207 * not powering-off. Some BIOSes need to remain in ACPI-mode
1210 1208 * for power-off to succeed (Dell Dimension 4600)
1211 1209 * Do not disable ACPI while doing fastreboot
1212 1210 */
1213 1211 if (apic_enable_acpi && fcn != AD_POWEROFF && fcn != AD_FASTREBOOT)
1214 1212 (void) AcpiDisable();
1215 1213
1216 1214 if (fcn == AD_FASTREBOOT) {
1217 1215 apic_reg_ops->apic_write(APIC_INT_CMD1,
1218 1216 AV_ASSERT | AV_RESET | AV_SH_ALL_EXCSELF);
1219 1217 }
1220 1218
1221 1219 /* remainder of function is for shutdown+poweroff case only */
1222 1220 if (fcn != AD_POWEROFF)
1223 1221 return;
1224 1222
1225 1223 switch (apic_poweroff_method) {
1226 1224 case APIC_POWEROFF_VIA_RTC:
1227 1225
1228 1226 /* select the extended NVRAM bank in the RTC */
1229 1227 outb(CMOS_ADDR, RTC_REGA);
1230 1228 byte = inb(CMOS_DATA);
1231 1229 outb(CMOS_DATA, (byte | EXT_BANK));
1232 1230
1233 1231 outb(CMOS_ADDR, PFR_REG);
1234 1232
1235 1233 /* for Predator must toggle the PAB bit */
1236 1234 byte = inb(CMOS_DATA);
1237 1235
1238 1236 /*
1239 1237 * clear power active bar, wakeup alarm and
1240 1238 * kickstart
1241 1239 */
1242 1240 byte &= ~(PAB_CBIT | WF_FLAG | KS_FLAG);
1243 1241 outb(CMOS_DATA, byte);
1244 1242
1245 1243 /* delay before next write */
1246 1244 drv_usecwait(1000);
1247 1245
1248 1246 /* for S40 the following would suffice */
1249 1247 byte = inb(CMOS_DATA);
1250 1248
1251 1249 /* power active bar control bit */
1252 1250 byte |= PAB_CBIT;
1253 1251 outb(CMOS_DATA, byte);
1254 1252
1255 1253 break;
1256 1254
1257 1255 case APIC_POWEROFF_VIA_ASPEN_BMC:
1258 1256 restarts = 0;
1259 1257 restart_aspen_bmc:
1260 1258 if (++restarts == 3)
1261 1259 break;
1262 1260 attempts = 0;
1263 1261 do {
1264 1262 byte = inb(MISMIC_FLAG_REGISTER);
1265 1263 byte &= MISMIC_BUSY_MASK;
1266 1264 if (byte != 0) {
1267 1265 drv_usecwait(1000);
1268 1266 if (attempts >= 3)
1269 1267 goto restart_aspen_bmc;
1270 1268 ++attempts;
1271 1269 }
1272 1270 } while (byte != 0);
1273 1271 outb(MISMIC_CNTL_REGISTER, CC_SMS_GET_STATUS);
1274 1272 byte = inb(MISMIC_FLAG_REGISTER);
1275 1273 byte |= 0x1;
1276 1274 outb(MISMIC_FLAG_REGISTER, byte);
1277 1275 i = 0;
1278 1276 for (; i < (sizeof (aspen_bmc)/sizeof (aspen_bmc[0]));
1279 1277 i++) {
1280 1278 attempts = 0;
1281 1279 do {
1282 1280 byte = inb(MISMIC_FLAG_REGISTER);
1283 1281 byte &= MISMIC_BUSY_MASK;
1284 1282 if (byte != 0) {
1285 1283 drv_usecwait(1000);
1286 1284 if (attempts >= 3)
1287 1285 goto restart_aspen_bmc;
1288 1286 ++attempts;
1289 1287 }
1290 1288 } while (byte != 0);
1291 1289 outb(MISMIC_CNTL_REGISTER, aspen_bmc[i].cntl);
1292 1290 outb(MISMIC_DATA_REGISTER, aspen_bmc[i].data);
1293 1291 byte = inb(MISMIC_FLAG_REGISTER);
1294 1292 byte |= 0x1;
1295 1293 outb(MISMIC_FLAG_REGISTER, byte);
1296 1294 }
1297 1295 break;
1298 1296
1299 1297 case APIC_POWEROFF_VIA_SITKA_BMC:
1300 1298 restarts = 0;
1301 1299 restart_sitka_bmc:
1302 1300 if (++restarts == 3)
1303 1301 break;
1304 1302 attempts = 0;
1305 1303 do {
1306 1304 byte = inb(SMS_STATUS_REGISTER);
1307 1305 byte &= SMS_STATE_MASK;
1308 1306 if ((byte == SMS_READ_STATE) ||
1309 1307 (byte == SMS_WRITE_STATE)) {
1310 1308 drv_usecwait(1000);
1311 1309 if (attempts >= 3)
1312 1310 goto restart_sitka_bmc;
1313 1311 ++attempts;
1314 1312 }
1315 1313 } while ((byte == SMS_READ_STATE) ||
1316 1314 (byte == SMS_WRITE_STATE));
1317 1315 outb(SMS_COMMAND_REGISTER, SMS_GET_STATUS);
1318 1316 i = 0;
1319 1317 for (; i < (sizeof (sitka_bmc)/sizeof (sitka_bmc[0]));
1320 1318 i++) {
1321 1319 attempts = 0;
1322 1320 do {
1323 1321 byte = inb(SMS_STATUS_REGISTER);
1324 1322 byte &= SMS_IBF_MASK;
1325 1323 if (byte != 0) {
1326 1324 drv_usecwait(1000);
1327 1325 if (attempts >= 3)
1328 1326 goto restart_sitka_bmc;
1329 1327 ++attempts;
1330 1328 }
1331 1329 } while (byte != 0);
1332 1330 outb(sitka_bmc[i].port, sitka_bmc[i].data);
1333 1331 }
1334 1332 break;
1335 1333
1336 1334 case APIC_POWEROFF_NONE:
1337 1335
1338 1336 /* If no APIC direct method, we will try using ACPI */
1339 1337 if (apic_enable_acpi) {
1340 1338 if (acpi_poweroff() == 1)
1341 1339 return;
1342 1340 } else
1343 1341 return;
1344 1342
1345 1343 break;
1346 1344 }
1347 1345 /*
1348 1346 * Wait a limited time here for power to go off.
1349 1347 * If the power does not go off, then there was a
1350 1348 * problem and we should continue to the halt which
1351 1349 * prints a message for the user to press a key to
1352 1350 * reboot.
1353 1351 */
1354 1352 drv_usecwait(7000000); /* wait seven seconds */
1355 1353
1356 1354 }
1357 1355
1358 1356 cyclic_id_t apic_cyclic_id;
1359 1357
1360 1358 /*
1361 1359 * The following functions are in the platform specific file so that they
1362 1360 * can be different functions depending on whether we are running on
1363 1361 * bare metal or a hypervisor.
1364 1362 */
1365 1363
1366 1364 /*
1367 1365 * map an apic for memory-mapped access
1368 1366 */
1369 1367 uint32_t *
1370 1368 mapin_apic(uint32_t addr, size_t len, int flags)
1371 1369 {
1372 1370 return ((void *)psm_map_phys(addr, len, flags));
1373 1371 }
1374 1372
1375 1373 uint32_t *
1376 1374 mapin_ioapic(uint32_t addr, size_t len, int flags)
1377 1375 {
1378 1376 return (mapin_apic(addr, len, flags));
1379 1377 }
1380 1378
1381 1379 /*
1382 1380 * unmap an apic
1383 1381 */
1384 1382 void
1385 1383 mapout_apic(caddr_t addr, size_t len)
1386 1384 {
1387 1385 psm_unmap_phys(addr, len);
1388 1386 }
1389 1387
1390 1388 void
1391 1389 mapout_ioapic(caddr_t addr, size_t len)
1392 1390 {
1393 1391 mapout_apic(addr, len);
1394 1392 }
1395 1393
1396 1394 uint32_t
1397 1395 ioapic_read(int ioapic_ix, uint32_t reg)
1398 1396 {
1399 1397 volatile uint32_t *ioapic;
1400 1398
1401 1399 ioapic = apicioadr[ioapic_ix];
1402 1400 ioapic[APIC_IO_REG] = reg;
1403 1401 return (ioapic[APIC_IO_DATA]);
1404 1402 }
1405 1403
1406 1404 void
1407 1405 ioapic_write(int ioapic_ix, uint32_t reg, uint32_t value)
1408 1406 {
1409 1407 volatile uint32_t *ioapic;
1410 1408
1411 1409 ioapic = apicioadr[ioapic_ix];
1412 1410 ioapic[APIC_IO_REG] = reg;
1413 1411 ioapic[APIC_IO_DATA] = value;
1414 1412 }
1415 1413
1416 1414 void
1417 1415 ioapic_write_eoi(int ioapic_ix, uint32_t value)
1418 1416 {
1419 1417 volatile uint32_t *ioapic;
1420 1418
1421 1419 ioapic = apicioadr[ioapic_ix];
1422 1420 ioapic[APIC_IO_EOI] = value;
1423 1421 }
1424 1422
1425 1423 /*
1426 1424 * Round-robin algorithm to find the next CPU with interrupts enabled.
1427 1425 * It can't share the same static variable apic_next_bind_cpu with
1428 1426 * apic_get_next_bind_cpu(), since that will cause all interrupts to be
1429 1427 * bound to CPU1 at boot time. During boot, only CPU0 is online with
1430 1428 * interrupts enabled when apic_get_next_bind_cpu() and apic_find_cpu()
1431 1429 * are called. However, the pcplusmp driver assumes that there will be
1432 1430 * boot_ncpus CPUs configured eventually so it tries to distribute all
1433 1431 * interrupts among CPU0 - CPU[boot_ncpus - 1]. Thus to prevent all
1434 1432 * interrupts being targetted at CPU1, we need to use a dedicated static
1435 1433 * variable for find_next_cpu() instead of sharing apic_next_bind_cpu.
1436 1434 */
1437 1435
1438 1436 processorid_t
1439 1437 apic_find_cpu(int flag)
1440 1438 {
1441 1439 int i;
1442 1440 static processorid_t acid = 0;
1443 1441
1444 1442 /* Find the first CPU with the passed-in flag set */
1445 1443 for (i = 0; i < apic_nproc; i++) {
1446 1444 if (++acid >= apic_nproc) {
1447 1445 acid = 0;
1448 1446 }
↓ open down ↓ |
1306 lines elided |
↑ open up ↑ |
1449 1447 if (apic_cpu_in_range(acid) &&
1450 1448 (apic_cpus[acid].aci_status & flag)) {
1451 1449 break;
1452 1450 }
1453 1451 }
1454 1452
1455 1453 ASSERT((apic_cpus[acid].aci_status & flag) != 0);
1456 1454 return (acid);
1457 1455 }
1458 1456
1459 -/*
1460 - * Switch between safe and x2APIC IPI sending method.
1461 - * CPU may power on in xapic mode or x2apic mode. If CPU needs to send IPI to
1462 - * other CPUs before entering x2APIC mode, it still needs to xAPIC method.
1463 - * Before sending StartIPI to target CPU, psm_send_ipi will be changed to
1464 - * apic_common_send_ipi, which detects current local APIC mode and use right
1465 - * method to send IPI. If some CPUs fail to start up, apic_poweron_cnt
1466 - * won't return to zero, so apic_common_send_ipi will always be used.
1467 - * psm_send_ipi can't be simply changed back to x2apic_send_ipi if some CPUs
1468 - * failed to start up because those failed CPUs may recover itself later at
1469 - * unpredictable time.
1470 - */
1471 -void
1472 -apic_switch_ipi_callback(boolean_t enter)
1473 -{
1474 - ulong_t iflag;
1475 - struct psm_ops *pops = psmops;
1476 -
1477 - iflag = intr_clear();
1478 - lock_set(&apic_mode_switch_lock);
1479 - if (enter) {
1480 - ASSERT(apic_poweron_cnt >= 0);
1481 - if (apic_poweron_cnt == 0) {
1482 - pops->psm_send_ipi = apic_common_send_ipi;
1483 - send_dirintf = pops->psm_send_ipi;
1484 - }
1485 - apic_poweron_cnt++;
1486 - } else {
1487 - ASSERT(apic_poweron_cnt > 0);
1488 - apic_poweron_cnt--;
1489 - if (apic_poweron_cnt == 0) {
1490 - pops->psm_send_ipi = x2apic_send_ipi;
1491 - send_dirintf = pops->psm_send_ipi;
1492 - }
1493 - }
1494 - lock_clear(&apic_mode_switch_lock);
1495 - intr_restore(iflag);
1496 -}
1497 -
1498 1457 void
1499 1458 apic_intrmap_init(int apic_mode)
1500 1459 {
1501 1460 int suppress_brdcst_eoi = 0;
1502 1461
1503 1462 /*
1504 1463 * Intel Software Developer's Manual 3A, 10.12.7:
1505 1464 *
1506 1465 * Routing of device interrupts to local APIC units operating in
1507 1466 * x2APIC mode requires use of the interrupt-remapping architecture
1508 1467 * specified in the Intel Virtualization Technology for Directed
1509 1468 * I/O, Revision 1.3. Because of this, BIOS must enumerate support
1510 1469 * for and software must enable this interrupt remapping with
1511 1470 * Extended Interrupt Mode Enabled before it enabling x2APIC mode in
1512 1471 * the local APIC units.
1513 1472 *
1514 1473 *
1515 1474 * In other words, to use the APIC in x2APIC mode, we need interrupt
1516 1475 * remapping. Since we don't start up the IOMMU by default, we
1517 1476 * won't be able to do any interrupt remapping and therefore have to
1518 1477 * use the APIC in traditional 'local APIC' mode with memory mapped
1519 1478 * I/O.
1520 1479 */
1521 1480
1522 1481 if (psm_vt_ops != NULL) {
1523 1482 if (((apic_intrmap_ops_t *)psm_vt_ops)->
1524 1483 apic_intrmap_init(apic_mode) == DDI_SUCCESS) {
1525 1484
1526 1485 apic_vt_ops = psm_vt_ops;
1527 1486
1528 1487 /*
1529 1488 * We leverage the interrupt remapping engine to
1530 1489 * suppress broadcast EOI; thus we must send the
1531 1490 * directed EOI with the directed-EOI handler.
1532 1491 */
1533 1492 if (apic_directed_EOI_supported() == 0) {
1534 1493 suppress_brdcst_eoi = 1;
1535 1494 }
1536 1495
1537 1496 apic_vt_ops->apic_intrmap_enable(suppress_brdcst_eoi);
1538 1497
1539 1498 if (apic_detect_x2apic()) {
1540 1499 apic_enable_x2apic();
1541 1500 }
1542 1501
1543 1502 if (apic_directed_EOI_supported() == 0) {
1544 1503 apic_set_directed_EOI_handler();
1545 1504 }
1546 1505 }
1547 1506 }
1548 1507 }
1549 1508
1550 1509 /*ARGSUSED*/
1551 1510 static void
1552 1511 apic_record_ioapic_rdt(void *intrmap_private, ioapic_rdt_t *irdt)
1553 1512 {
1554 1513 irdt->ir_hi <<= APIC_ID_BIT_OFFSET;
1555 1514 }
1556 1515
1557 1516 /*ARGSUSED*/
1558 1517 static void
1559 1518 apic_record_msi(void *intrmap_private, msi_regs_t *mregs)
1560 1519 {
1561 1520 mregs->mr_addr = MSI_ADDR_HDR |
1562 1521 (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
1563 1522 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT) |
1564 1523 (mregs->mr_addr << MSI_ADDR_DEST_SHIFT);
1565 1524 mregs->mr_data = (MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) |
1566 1525 mregs->mr_data;
1567 1526 }
1568 1527
1569 1528 /*
1570 1529 * Functions from apic_introp.c
1571 1530 *
1572 1531 * Those functions are used by apic_intr_ops().
1573 1532 */
1574 1533
1575 1534 /*
1576 1535 * MSI support flag:
1577 1536 * reflects whether MSI is supported at APIC level
1578 1537 * it can also be patched through /etc/system
1579 1538 *
1580 1539 * 0 = default value - don't know and need to call apic_check_msi_support()
1581 1540 * to find out then set it accordingly
1582 1541 * 1 = supported
1583 1542 * -1 = not supported
1584 1543 */
1585 1544 int apic_support_msi = 0;
1586 1545
1587 1546 /* Multiple vector support for MSI-X */
1588 1547 int apic_msix_enable = 1;
1589 1548
1590 1549 /* Multiple vector support for MSI */
1591 1550 int apic_multi_msi_enable = 1;
1592 1551
1593 1552 /*
1594 1553 * Check whether the system supports MSI.
1595 1554 *
1596 1555 * MSI is required for PCI-E and for PCI versions later than 2.2, so if we find
1597 1556 * a PCI-E bus or we find a PCI bus whose version we know is >= 2.2, then we
1598 1557 * return PSM_SUCCESS to indicate this system supports MSI.
1599 1558 *
1600 1559 * (Currently the only way we check whether a given PCI bus supports >= 2.2 is
1601 1560 * by detecting if we are running inside the KVM hypervisor, which guarantees
1602 1561 * this version number.)
1603 1562 */
1604 1563 int
1605 1564 apic_check_msi_support()
1606 1565 {
1607 1566 dev_info_t *cdip;
1608 1567 char dev_type[16];
1609 1568 int dev_len;
1610 1569
1611 1570 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n"));
1612 1571
1613 1572 /*
1614 1573 * check whether the first level children of root_node have
1615 1574 * PCI-E or PCI capability.
1616 1575 */
1617 1576 for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL;
1618 1577 cdip = ddi_get_next_sibling(cdip)) {
1619 1578
1620 1579 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p,"
1621 1580 " driver: %s, binding: %s, nodename: %s\n", (void *)cdip,
1622 1581 ddi_driver_name(cdip), ddi_binding_name(cdip),
1623 1582 ddi_node_name(cdip)));
1624 1583 dev_len = sizeof (dev_type);
1625 1584 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
1626 1585 "device_type", (caddr_t)dev_type, &dev_len)
1627 1586 != DDI_PROP_SUCCESS)
1628 1587 continue;
1629 1588 if (strcmp(dev_type, "pciex") == 0)
1630 1589 return (PSM_SUCCESS);
1631 1590 if (strcmp(dev_type, "pci") == 0 && get_hwenv() == HW_KVM)
1632 1591 return (PSM_SUCCESS);
1633 1592 }
1634 1593
1635 1594 /* MSI is not supported on this system */
1636 1595 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' "
1637 1596 "device_type found\n"));
1638 1597 return (PSM_FAILURE);
1639 1598 }
1640 1599
1641 1600 /*
1642 1601 * apic_pci_msi_unconfigure:
1643 1602 *
1644 1603 * This and next two interfaces are copied from pci_intr_lib.c
1645 1604 * Do ensure that these two files stay in sync.
1646 1605 * These needed to be copied over here to avoid a deadlock situation on
1647 1606 * certain mp systems that use MSI interrupts.
1648 1607 *
1649 1608 * IMPORTANT regards next three interfaces:
1650 1609 * i) are called only for MSI/X interrupts.
1651 1610 * ii) called with interrupts disabled, and must not block
1652 1611 */
1653 1612 void
1654 1613 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum)
1655 1614 {
1656 1615 ushort_t msi_ctrl;
1657 1616 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
1658 1617 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip);
1659 1618
1660 1619 ASSERT((handle != NULL) && (cap_ptr != 0));
1661 1620
1662 1621 if (type == DDI_INTR_TYPE_MSI) {
1663 1622 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1664 1623 msi_ctrl &= (~PCI_MSI_MME_MASK);
1665 1624 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
1666 1625 pci_config_put32(handle, cap_ptr + PCI_MSI_ADDR_OFFSET, 0);
1667 1626
1668 1627 if (msi_ctrl & PCI_MSI_64BIT_MASK) {
1669 1628 pci_config_put16(handle,
1670 1629 cap_ptr + PCI_MSI_64BIT_DATA, 0);
1671 1630 pci_config_put32(handle,
1672 1631 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, 0);
1673 1632 } else {
1674 1633 pci_config_put16(handle,
1675 1634 cap_ptr + PCI_MSI_32BIT_DATA, 0);
1676 1635 }
1677 1636
1678 1637 } else if (type == DDI_INTR_TYPE_MSIX) {
1679 1638 uintptr_t off;
1680 1639 uint32_t mask;
1681 1640 ddi_intr_msix_t *msix_p = i_ddi_get_msix(rdip);
1682 1641
1683 1642 ASSERT(msix_p != NULL);
1684 1643
1685 1644 /* Offset into "inum"th entry in the MSI-X table & mask it */
1686 1645 off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
1687 1646 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
1688 1647
1689 1648 mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
1690 1649
1691 1650 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask | 1));
1692 1651
1693 1652 /* Offset into the "inum"th entry in the MSI-X table */
1694 1653 off = (uintptr_t)msix_p->msix_tbl_addr +
1695 1654 (inum * PCI_MSIX_VECTOR_SIZE);
1696 1655
1697 1656 /* Reset the "data" and "addr" bits */
1698 1657 ddi_put32(msix_p->msix_tbl_hdl,
1699 1658 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), 0);
1700 1659 ddi_put64(msix_p->msix_tbl_hdl, (uint64_t *)off, 0);
1701 1660 }
1702 1661 }
1703 1662
1704 1663 /*
1705 1664 * apic_pci_msi_disable_mode:
1706 1665 */
1707 1666 void
1708 1667 apic_pci_msi_disable_mode(dev_info_t *rdip, int type)
1709 1668 {
1710 1669 ushort_t msi_ctrl;
1711 1670 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
1712 1671 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip);
1713 1672
1714 1673 ASSERT((handle != NULL) && (cap_ptr != 0));
1715 1674
1716 1675 if (type == DDI_INTR_TYPE_MSI) {
1717 1676 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1718 1677 if (!(msi_ctrl & PCI_MSI_ENABLE_BIT))
1719 1678 return;
1720 1679
1721 1680 msi_ctrl &= ~PCI_MSI_ENABLE_BIT; /* MSI disable */
1722 1681 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
1723 1682
1724 1683 } else if (type == DDI_INTR_TYPE_MSIX) {
1725 1684 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
1726 1685 if (msi_ctrl & PCI_MSIX_ENABLE_BIT) {
1727 1686 msi_ctrl &= ~PCI_MSIX_ENABLE_BIT;
1728 1687 pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
1729 1688 msi_ctrl);
1730 1689 }
1731 1690 }
1732 1691 }
1733 1692
1734 1693 uint32_t
1735 1694 apic_get_localapicid(uint32_t cpuid)
1736 1695 {
1737 1696 ASSERT(cpuid < apic_nproc && apic_cpus != NULL);
1738 1697
1739 1698 return (apic_cpus[cpuid].aci_local_id);
1740 1699 }
1741 1700
1742 1701 uchar_t
1743 1702 apic_get_ioapicid(uchar_t ioapicindex)
1744 1703 {
1745 1704 ASSERT(ioapicindex < MAX_IO_APIC);
1746 1705
1747 1706 return (apic_io_id[ioapicindex]);
1748 1707 }
↓ open down ↓ |
241 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX