228 hpet_t hpet;
229
230 #endif /* ifndef __xpv */
231
232 uint_t cp_haltset_fanout = 0;
233
234 /*ARGSUSED*/
235 int
236 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
237 {
238 switch (hw) {
239 case PGHW_IPIPE:
240 if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
241 /*
242 * Hyper-threading is SMT
243 */
244 return (1);
245 } else {
246 return (0);
247 }
248 case PGHW_PROCNODE:
249 if (cpuid_get_procnodes_per_pkg(cp) > 1)
250 return (1);
251 else
252 return (0);
253 case PGHW_CHIP:
254 if (is_x86_feature(x86_featureset, X86FSET_CMP) ||
255 is_x86_feature(x86_featureset, X86FSET_HTT))
256 return (1);
257 else
258 return (0);
259 case PGHW_CACHE:
260 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1)
261 return (1);
262 else
263 return (0);
264 case PGHW_POW_ACTIVE:
265 if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1)
266 return (1);
267 else
289 pgp_b = pg_plat_hw_instance_id(cpu_b, hw);
290
291 if (pgp_a == -1 || pgp_b == -1)
292 return (-1);
293
294 return (pgp_a == pgp_b);
295 }
296
297 /*
298 * Return a physical instance identifier for known hardware sharing
299 * relationships
300 */
301 id_t
302 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
303 {
304 switch (hw) {
305 case PGHW_IPIPE:
306 return (cpuid_get_coreid(cpu));
307 case PGHW_CACHE:
308 return (cpuid_get_last_lvl_cacheid(cpu));
309 case PGHW_PROCNODE:
310 return (cpuid_get_procnodeid(cpu));
311 case PGHW_CHIP:
312 return (cpuid_get_chipid(cpu));
313 case PGHW_POW_ACTIVE:
314 return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE));
315 case PGHW_POW_IDLE:
316 return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE));
317 default:
318 return (-1);
319 }
320 }
321
322 /*
323 * Express preference for optimizing for sharing relationship
324 * hw1 vs hw2
325 */
326 pghw_type_t
327 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
328 {
329 int i, rank1, rank2;
330
331 static pghw_type_t hw_hier[] = {
332 PGHW_IPIPE,
333 PGHW_CACHE,
334 PGHW_PROCNODE,
335 PGHW_CHIP,
336 PGHW_POW_IDLE,
337 PGHW_POW_ACTIVE,
338 PGHW_NUM_COMPONENTS
339 };
340
341 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
342 if (hw_hier[i] == hw1)
343 rank1 = i;
344 if (hw_hier[i] == hw2)
345 rank2 = i;
346 }
347
348 if (rank1 > rank2)
349 return (hw1);
350 else
351 return (hw2);
352 }
353
354 /*
355 * Override the default CMT dispatcher policy for the specified
356 * hardware sharing relationship
357 */
358 pg_cmt_policy_t
359 pg_plat_cmt_policy(pghw_type_t hw)
360 {
361 /*
362 * For shared caches, also load balance across them to
363 * maximize aggregate cache capacity
364 */
365 switch (hw) {
366 case PGHW_CACHE:
367 return (CMT_BALANCE|CMT_AFFINITY);
368 default:
369 return (CMT_NO_POLICY);
370 }
371 }
372
373 id_t
374 pg_plat_get_core_id(cpu_t *cpu)
375 {
376 return ((id_t)cpuid_get_coreid(cpu));
377 }
378
379 void
380 cmp_set_nosteal_interval(void)
381 {
382 /* Set the nosteal interval (used by disp_getbest()) to 100us */
383 nosteal_nsec = 100000UL;
384 }
385
|
228 hpet_t hpet;
229
230 #endif /* ifndef __xpv */
231
232 uint_t cp_haltset_fanout = 0;
233
234 /*ARGSUSED*/
235 int
236 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
237 {
238 switch (hw) {
239 case PGHW_IPIPE:
240 if (is_x86_feature(x86_featureset, X86FSET_HTT)) {
241 /*
242 * Hyper-threading is SMT
243 */
244 return (1);
245 } else {
246 return (0);
247 }
248 case PGHW_FPU:
249 if (cpuid_get_cores_per_compunit(cp) > 1)
250 return (1);
251 else
252 return (0);
253 case PGHW_PROCNODE:
254 if (cpuid_get_procnodes_per_pkg(cp) > 1)
255 return (1);
256 else
257 return (0);
258 case PGHW_CHIP:
259 if (is_x86_feature(x86_featureset, X86FSET_CMP) ||
260 is_x86_feature(x86_featureset, X86FSET_HTT))
261 return (1);
262 else
263 return (0);
264 case PGHW_CACHE:
265 if (cpuid_get_ncpu_sharing_last_cache(cp) > 1)
266 return (1);
267 else
268 return (0);
269 case PGHW_POW_ACTIVE:
270 if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1)
271 return (1);
272 else
294 pgp_b = pg_plat_hw_instance_id(cpu_b, hw);
295
296 if (pgp_a == -1 || pgp_b == -1)
297 return (-1);
298
299 return (pgp_a == pgp_b);
300 }
301
302 /*
303 * Return a physical instance identifier for known hardware sharing
304 * relationships
305 */
306 id_t
307 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
308 {
309 switch (hw) {
310 case PGHW_IPIPE:
311 return (cpuid_get_coreid(cpu));
312 case PGHW_CACHE:
313 return (cpuid_get_last_lvl_cacheid(cpu));
314 case PGHW_FPU:
315 return (cpuid_get_compunitid(cpu));
316 case PGHW_PROCNODE:
317 return (cpuid_get_procnodeid(cpu));
318 case PGHW_CHIP:
319 return (cpuid_get_chipid(cpu));
320 case PGHW_POW_ACTIVE:
321 return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE));
322 case PGHW_POW_IDLE:
323 return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE));
324 default:
325 return (-1);
326 }
327 }
328
329 /*
330 * Express preference for optimizing for sharing relationship
331 * hw1 vs hw2
332 */
333 pghw_type_t
334 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
335 {
336 int i, rank1, rank2;
337
338 static pghw_type_t hw_hier[] = {
339 PGHW_IPIPE,
340 PGHW_CACHE,
341 PGHW_FPU,
342 PGHW_PROCNODE,
343 PGHW_CHIP,
344 PGHW_POW_IDLE,
345 PGHW_POW_ACTIVE,
346 PGHW_NUM_COMPONENTS
347 };
348
349 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
350 if (hw_hier[i] == hw1)
351 rank1 = i;
352 if (hw_hier[i] == hw2)
353 rank2 = i;
354 }
355
356 if (rank1 > rank2)
357 return (hw1);
358 else
359 return (hw2);
360 }
361
362 /*
363 * Override the default CMT dispatcher policy for the specified
364 * hardware sharing relationship
365 */
366 pg_cmt_policy_t
367 pg_plat_cmt_policy(pghw_type_t hw)
368 {
369 /*
370 * For shared caches, also load balance across them to
371 * maximize aggregate cache capacity
372 *
373 * On AMD family 0x15 CPUs, cores come in pairs called
374 * compute units, sharing the FPU and the I$ and L2
375 * caches. Use balancing and cache affinity.
376 */
377 switch (hw) {
378 case PGHW_FPU:
379 case PGHW_CACHE:
380 return (CMT_BALANCE|CMT_AFFINITY);
381 default:
382 return (CMT_NO_POLICY);
383 }
384 }
385
386 id_t
387 pg_plat_get_core_id(cpu_t *cpu)
388 {
389 return ((id_t)cpuid_get_coreid(cpu));
390 }
391
392 void
393 cmp_set_nosteal_interval(void)
394 {
395 /* Set the nosteal interval (used by disp_getbest()) to 100us */
396 nosteal_nsec = 100000UL;
397 }
398
|